X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=utils%2Ftest%2Freporting%2Ffunctest%2Freporting-status.py;h=622c375cc230645b4f2d25d9badd3fb299fa3770;hb=30e5e4d278cbe37e32c76d8351c563173f1b1901;hp=9271717bb56745d6a080898382f58fe53cfa6f6a;hpb=e633e433df62fe13838052f7e0c3d23645368fd7;p=releng.git diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py old mode 100644 new mode 100755 index 9271717bb..df5632335 --- a/utils/test/reporting/functest/reporting-status.py +++ b/utils/test/reporting/functest/reporting-status.py @@ -1,363 +1,278 @@ -from urllib2 import Request, urlopen, URLError +#!/usr/bin/python +# +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# import datetime -import json import jinja2 import os -import re import requests import sys import time import yaml -# Declaration of the variables -functest_test_list = ['vPing', 'vPing_userdata', - 'Tempest', 'Rally', - 'ODL', 'ONOS', 'vIMS'] -# functest_test_list = ['vPing'] -companion_test_list = ['doctor/doctor-notification', 'promise/promise'] -# companion_test_list = [] -installers = ["apex", "compass", "fuel", "joid"] -# installers = ["fuel"] -versions = ["brahmaputra", "master"] -# versions = ["master"] -PERIOD = 10 -MAX_SCENARIO_CRITERIA = 18 +import testCase as tc +import scenarioResult as sr -# Correspondance between the name of the test case and the name in the DB -# ideally we should modify the DB to avoid such interface.... -# ' -# I know it is uggly... -test_match_matrix = {'healthcheck': 'healthcheck', - 'vPing': 'vping_ssh', - 'vPing_userdata': 'vping_userdata', - 'ODL': 'odl', - 'ONOS': 'onos', - 'Tempest': 'tempest', - 'Rally': 'rally', - 'vIMS': 'vims', - 'doctor-notification': 'doctor', - 'promise': 'promise'} +# manage conf +import utils.reporting_utils as rp_utils +# Logger +logger = rp_utils.getLogger("Functest-Status") -class TestCase(object): - def __init__(self, name, project, criteria=-1, isRunnable=True): - self.name = name - self.project = project - self.criteria = criteria - self.isRunnable = isRunnable - - def getName(self): - return self.name - - def getProject(self): - return self.project - - def getCriteria(self): - return self.criteria - - def setCriteria(self, criteria): - self.criteria = criteria - - def setIsRunnable(self, isRunnable): - self.isRunnable = isRunnable - - def checkRunnable(self, installer, scenario, config): - # Re-use Functest declaration - # Retrieve Functest configuration file functest_config.yaml - is_runnable = True - config_test = "" - TEST_ENV = functest_yaml_config.get("test-dependencies") - - # print " *********************** " - # print TEST_ENV - # print " ---------------------- " - # print "case = " + self.name - # print "installer = " + installer - # print "scenario = " + scenario - # print "project = " + self.project - - # Retrieve test constraints - case_name_formated = test_match_matrix[self.name] - - try: - config_test = TEST_ENV[self.project][case_name_formated] - except KeyError: - # if not defined in dependencies => no dependencies - config_test = TEST_ENV[case_name_formated] - except Exception, e: - print "Error [getTestEnv]:", e - - # Retrieve test execution param - test_execution_context = {"installer": installer, - "scenario": scenario} - # By default we assume that all the tests are always runnable... - # if test_env not empty => dependencies to be checked - if config_test is not None and len(config_test) > 0: - # possible criteria = ["installer", "scenario"] - # consider test criteria from config file - # compare towards CI env through CI en variable - for criteria in config_test: - if re.search(config_test[criteria], - test_execution_context[criteria]) is None: - # print "Test "+ test + " cannot be run on the environment" - is_runnable = False - # print is_runnable - self.isRunnable = is_runnable - - -class ScenarioResult(object): - def __init__(self, status, score=0): - self.status = status - self.score = score - - def getStatus(self): - return self.status - - def getScore(self): - return self.score - -# ***************************************************************************** - - -def getApiResults(case, installer, scenario, version): - case = case.getName() - results = json.dumps([]) - # to remove proxy (to be removed at the end for local test only) - # proxy_handler = urllib2.ProxyHandler({}) - # opener = urllib2.build_opener(proxy_handler) - # urllib2.install_opener(opener) - # url = "http://127.0.0.1:8000/results?case=" + case + \ - # "&period=30&installer=" + installer - url = ("http://testresults.opnfv.org/testapi/results?case=" + case + - "&period=" + str(PERIOD) + "&installer=" + installer + - "&scenario=" + scenario + "&version=" + version) - request = Request(url) - - try: - response = urlopen(request) - k = response.read() - results = json.loads(k) - except URLError, e: - print 'No kittez. Got an error code:', e - - return results - - -def getScenarios(case, installer, version): - - case = case.getName() - url = "http://testresults.opnfv.org/testapi/results?case=" + case + \ - "&period=" + str(PERIOD) + "&installer=" + installer + \ - "&version=" + version - request = Request(url) - - try: - response = urlopen(request) - k = response.read() - results = json.loads(k) - except URLError, e: - print 'Got an error code:', e - - test_results = results['test_results'] - - if test_results is not None: - test_results.reverse() - - scenario_results = {} - - for r in test_results: - # Retrieve all the scenarios per installer - if not r['scenario'] in scenario_results.keys(): - scenario_results[r['scenario']] = [] - scenario_results[r['scenario']].append(r) - - return scenario_results - - -def getScenarioStats(scenario_results): - scenario_stats = {} - for k, v in scenario_results.iteritems(): - scenario_stats[k] = len(v) - - return scenario_stats - - -def getNbtestOk(results): - nb_test_ok = 0 - for r in results: - for k, v in r.iteritems(): - try: - if "passed" in v: - nb_test_ok += 1 - except: - print "Cannot retrieve test status" - return nb_test_ok - - -def getResult(testCase, installer, scenario, version): - - # retrieve raw results - results = getApiResults(testCase, installer, scenario, version) - # let's concentrate on test results only - test_results = results['test_results'] - - # if results found, analyze them - if test_results is not None: - test_results.reverse() - - scenario_results = [] - - # print " ---------------- " - # print test_results - # print " ---------------- " - # print "nb of results:" + str(len(test_results)) - - for r in test_results: - # print r["creation_date"] - # print r["criteria"] - scenario_results.append({r["creation_date"]: r["criteria"]}) - # sort results - scenario_results.sort() - # 4 levels for the results - # 3: 4+ consecutive runs passing the success criteria - # 2: <4 successful consecutive runs but passing the criteria - # 1: close to pass the success criteria - # 0: 0% success, not passing - test_result_indicator = 0 - nbTestOk = getNbtestOk(scenario_results) - # print "Nb test OK:"+ str(nbTestOk) - # check that we have at least 4 runs - if nbTestOk < 1: - test_result_indicator = 0 - elif nbTestOk < 2: - test_result_indicator = 1 - else: - # Test the last 4 run - if (len(scenario_results) > 3): - last4runResults = scenario_results[-4:] - if getNbtestOk(last4runResults): - test_result_indicator = 3 - else: - test_result_indicator = 2 - else: - test_result_indicator = 2 - print " >>>> Test indicator:" + str(test_result_indicator) - return test_result_indicator - -# ****************************************************************************** -# ****************************************************************************** -# ****************************************************************************** -# ****************************************************************************** -# ****************************************************************************** +# Initialization +testValid = [] +otherTestCases = [] +reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") # init just tempest to get the list of scenarios # as all the scenarios run Tempest -tempest = TestCase("Tempest", "functest", -1) +tempest = tc.TestCase("tempest_smoke_serial", "functest", -1) # Retrieve the Functest configuration to detect which tests are relevant # according to the installer, scenario -cf = "https://git.opnfv.org/cgit/functest/plain/ci/config_functest.yaml" -response = requests.get(cf) -functest_yaml_config = yaml.load(response.text) +cf = rp_utils.get_config('functest.test_conf') +period = rp_utils.get_config('general.period') +versions = rp_utils.get_config('general.versions') +installers = rp_utils.get_config('general.installers') +blacklist = rp_utils.get_config('functest.blacklist') +log_level = rp_utils.get_config('general.log.log_level') +exclude_noha = rp_utils.get_config('functest.exclude_noha') +exclude_virtual = rp_utils.get_config('functest.exclude_virtual') -print "****************************************" -print "* Generating reporting..... *" -print ("* Data retention = %s days *" % PERIOD) -print "* *" -print "****************************************" +response = requests.get(cf) +functest_yaml_config = yaml.safe_load(response.text) + +logger.info("*******************************************") +logger.info("* *") +logger.info("* Generating reporting scenario status *") +logger.info("* Data retention: %s days *" % period) +logger.info("* Log level: %s *" % log_level) +logger.info("* *") +logger.info("* Virtual PODs exluded: %s *" % exclude_virtual) +logger.info("* NOHA scenarios excluded: %s *" % exclude_noha) +logger.info("* *") +logger.info("*******************************************") + +# Retrieve test cases of Tier 1 (smoke) +config_tiers = functest_yaml_config.get("tiers") + +# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features) +# to validate scenarios +# Tier > 2 are not used to validate scenarios but we display the results anyway +# tricky thing for the API as some tests are Functest tests +# other tests are declared directly in the feature projects +for tier in config_tiers: + if tier['order'] >= 0 and tier['order'] < 2: + for case in tier['testcases']: + if case['name'] not in blacklist: + testValid.append(tc.TestCase(case['name'], + "functest", + case['dependencies'])) + elif tier['order'] == 2: + for case in tier['testcases']: + if case['name'] not in blacklist: + testValid.append(tc.TestCase(case['name'], + case['name'], + case['dependencies'])) + elif tier['order'] > 2: + for case in tier['testcases']: + if case['name'] not in blacklist: + otherTestCases.append(tc.TestCase(case['name'], + "functest", + case['dependencies'])) + +logger.debug("Functest reporting start") # For all the versions for version in versions: # For all the installers for installer in installers: # get scenarios - scenario_results = getScenarios(tempest, installer, version) - scenario_stats = getScenarioStats(scenario_results) + scenario_results = rp_utils.getScenarios(tempest, installer, version) + scenario_stats = rp_utils.getScenarioStats(scenario_results) items = {} scenario_result_criteria = {} + scenario_file_name = ("./display/" + version + + "/functest/scenario_history.txt") + # initiate scenario file if it does not exist + if not os.path.isfile(scenario_file_name): + with open(scenario_file_name, "a") as my_file: + logger.debug("Create scenario file: %s" % scenario_file_name) + my_file.write("date,scenario,installer,detail,score\n") # For all the scenarios get results for s, s_result in scenario_results.items(): - testCases = [] + logger.info("---------------------------------") + logger.info("installer %s, version %s, scenario %s:" % + (installer, version, s)) + logger.debug("Scenario results: %s" % s_result) + # Green or Red light for a given scenario nb_test_runnable_for_this_scenario = 0 scenario_score = 0 - - # For each scenario declare the test cases - # Functest cases - for test_case in functest_test_list: - testCases.append(TestCase(test_case, "functest")) - - # project/case - for test_case in companion_test_list: - test_split = test_case.split("/") - test_project = test_split[0] - test_case = test_split[1] - testCases.append(TestCase(test_case, test_project)) - + # url of the last jenkins log corresponding to a given + # scenario + s_url = "" + if len(s_result) > 0: + build_tag = s_result[len(s_result)-1]['build_tag'] + logger.debug("Build tag: %s" % build_tag) + s_url = s_url = rp_utils.getJenkinsUrl(build_tag) + logger.info("last jenkins url: %s" % s_url) + testCases2BeDisplayed = [] # Check if test case is runnable / installer, scenario + # for the test case used for Scenario validation try: - for test_case in testCases: - test_case.checkRunnable(installer, s, functest_yaml_config) - # print "testcase %s is %s" % (test_case.getName(), - # test_case.isRunnable) - print ("installer %s, version %s, scenario %s:" % - (installer, version, s)) - for testCase in testCases: + # 1) Manage the test cases for the scenario validation + # concretely Tiers 0-3 + for test_case in testValid: + test_case.checkRunnable(installer, s, + test_case.getConstraints()) + logger.debug("testcase %s (%s) is %s" % + (test_case.getDisplayName(), + test_case.getName(), + test_case.isRunnable)) time.sleep(1) - if testCase.isRunnable: + if test_case.isRunnable: + dbName = test_case.getDbName() + name = test_case.getName() + displayName = test_case.getDisplayName() + project = test_case.getProject() nb_test_runnable_for_this_scenario += 1 - print (" Searching results for case %s " % - (testCase.getName())) - result = getResult(testCase, installer, s, version) - testCase.setCriteria(result) - items[s] = testCases + logger.info(" Searching results for case %s " % + (displayName)) + result = rp_utils.getResult(dbName, installer, + s, version) + # if no result set the value to 0 + if result < 0: + result = 0 + logger.info(" >>>> Test score = " + str(result)) + test_case.setCriteria(result) + test_case.setIsRunnable(True) + testCases2BeDisplayed.append(tc.TestCase(name, + project, + "", + result, + True, + 1)) scenario_score = scenario_score + result + + # 2) Manage the test cases for the scenario qualification + # concretely Tiers > 3 + for test_case in otherTestCases: + test_case.checkRunnable(installer, s, + test_case.getConstraints()) + logger.debug("testcase %s (%s) is %s" % + (test_case.getDisplayName(), + test_case.getName(), + test_case.isRunnable)) + time.sleep(1) + if test_case.isRunnable: + dbName = test_case.getDbName() + name = test_case.getName() + displayName = test_case.getDisplayName() + project = test_case.getProject() + logger.info(" Searching results for case %s " % + (displayName)) + result = rp_utils.getResult(dbName, installer, + s, version) + # at least 1 result for the test + if result > -1: + test_case.setCriteria(result) + test_case.setIsRunnable(True) + testCases2BeDisplayed.append(tc.TestCase(name, + project, + "", + result, + True, + 4)) + else: + logger.debug("No results found") + + items[s] = testCases2BeDisplayed except: - print ("installer %s, version %s, scenario %s" % - (installer, version, s)) - print "No data available , error %s " % (sys.exc_info()[0]) + logger.error("Error: installer %s, version %s, scenario %s" % + (installer, version, s)) + logger.error("No data available: %s " % (sys.exc_info()[0])) + # ********************************************** + # Evaluate the results for scenario validation + # ********************************************** # the validation criteria = nb runnable tests x 3 + # because each test case = 0,1,2 or 3 scenario_criteria = nb_test_runnable_for_this_scenario * 3 # if 0 runnable tests set criteria at a high value if scenario_criteria < 1: - scenario_criteria = MAX_SCENARIO_CRITERIA + scenario_criteria = 50 # conf.MAX_SCENARIO_CRITERIA s_score = str(scenario_score) + "/" + str(scenario_criteria) + s_score_percent = rp_utils.getScenarioPercent(scenario_score, + scenario_criteria) + s_status = "KO" if scenario_score < scenario_criteria: - print (">>>> scenario not OK, score = %s/%s" % - (scenario_score, scenario_criteria)) + logger.info(">>>> scenario not OK, score = %s/%s" % + (scenario_score, scenario_criteria)) s_status = "KO" else: - print ">>>>> scenario OK, save the information" + logger.info(">>>>> scenario OK, save the information") s_status = "OK" - with open("./release/" + version + - "/validated_scenario_history.txt", "a") as f: + path_validation_file = ("./display/" + version + + "/functest/" + + "validated_scenario_history.txt") + with open(path_validation_file, "a") as f: time_format = "%Y-%m-%d %H:%M" info = (datetime.datetime.now().strftime(time_format) + ";" + installer + ";" + s + "\n") f.write(info) - scenario_result_criteria[s] = ScenarioResult(s_status, s_score) - print "--------------------------" + # Save daily results in a file + with open(scenario_file_name, "a") as f: + info = (reportingDate + "," + s + "," + installer + + "," + s_score + "," + + str(round(s_score_percent)) + "\n") + f.write(info) + + scenario_result_criteria[s] = sr.ScenarioResult(s_status, + s_score, + s_score_percent, + s_url) + logger.info("--------------------------") - templateLoader = jinja2.FileSystemLoader(os.path.dirname - (os.path.abspath - (__file__))) - templateEnv = jinja2.Environment(loader=templateLoader) + templateLoader = jinja2.FileSystemLoader(".") + templateEnv = jinja2.Environment( + loader=templateLoader, autoescape=True) - TEMPLATE_FILE = "./template/index-status-tmpl.html" + TEMPLATE_FILE = "./functest/template/index-status-tmpl.html" template = templateEnv.get_template(TEMPLATE_FILE) outputText = template.render(scenario_stats=scenario_stats, scenario_results=scenario_result_criteria, items=items, installer=installer, - period=PERIOD, - version=version) + period=period, + version=version, + date=reportingDate) - with open("./release/" + version + - "/index-status-" + installer + ".html", "wb") as fh: + with open("./display/" + version + + "/functest/status-" + installer + ".html", "wb") as fh: fh.write(outputText) + + logger.info("Manage export CSV & PDF") + rp_utils.export_csv(scenario_file_name, installer, version) + logger.error("CSV generated...") + + # Generate outputs for export + # pdf + # TODO Change once web site updated...use the current one + # to test pdf production + url_pdf = rp_utils.get_config('general.url') + pdf_path = ("./display/" + version + + "/functest/status-" + installer + ".html") + pdf_doc_name = ("./display/" + version + + "/functest/status-" + installer + ".pdf") + rp_utils.export_pdf(pdf_path, pdf_doc_name) + logger.info("PDF generated...")