Change-Id: Ib1a99cbeeb66d4c51eedb56d25d317b92602d51d
Signed-off-by: Morgan Richomme <morgan.richomme@orange.com>
#
import datetime
import jinja2
#
import datetime
import jinja2
import os
import requests
import sys
import os
import requests
import sys
import testCase as tc
import scenarioResult as sr
import testCase as tc
import scenarioResult as sr
-testCases4Validation = []
+# Logger
+logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
+logger = logging.getLogger()
+
+fileHandler = logging.FileHandler("{0}/{1}".format('.', conf.LOG_FILE))
+fileHandler.setFormatter(logFormatter)
+logger.addHandler(fileHandler)
+
+consoleHandler = logging.StreamHandler()
+consoleHandler.setFormatter(logFormatter)
+logger.addHandler(consoleHandler)
+logger.setLevel(conf.LOG_LEVEL)
+
+# Initialization
+testValid = []
otherTestCases = []
# init just tempest to get the list of scenarios
otherTestCases = []
# init just tempest to get the list of scenarios
# Retrieve the Functest configuration to detect which tests are relevant
# according to the installer, scenario
# Retrieve the Functest configuration to detect which tests are relevant
# according to the installer, scenario
-# cf = "https://git.opnfv.org/cgit/functest/plain/ci/config_functest.yaml"
-cf = "https://git.opnfv.org/cgit/functest/plain/ci/testcases.yaml"
response = requests.get(cf)
response = requests.get(cf)
functest_yaml_config = yaml.load(response.text)
functest_yaml_config = yaml.load(response.text)
-print "****************************************"
-print "* Generating reporting..... *"
-print ("* Data retention = %s days *" % conf.PERIOD)
-print "* *"
-print "****************************************"
+logger.info("****************************************")
+logger.info("* Generating reporting..... *")
+logger.info("* Data retention = %s days *" % conf.PERIOD)
+logger.info("* *")
+logger.info("****************************************")
# Retrieve test cases of Tier 1 (smoke)
config_tiers = functest_yaml_config.get("tiers")
# Retrieve test cases of Tier 1 (smoke)
config_tiers = functest_yaml_config.get("tiers")
for tier in config_tiers:
if tier['order'] > 0 and tier['order'] < 3:
for case in tier['testcases']:
for tier in config_tiers:
if tier['order'] > 0 and tier['order'] < 3:
for case in tier['testcases']:
- testCases4Validation.append(tc.TestCase(case['name'],
- "functest",
- case['dependencies']))
+ if case['name'] not in conf.blacklist:
+ testValid.append(tc.TestCase(case['name'],
+ "functest",
+ case['dependencies']))
elif tier['order'] == 3:
for case in tier['testcases']:
elif tier['order'] == 3:
for case in tier['testcases']:
- testCases4Validation.append(tc.TestCase(case['name'],
- case['name'],
- case['dependencies']))
+ if case['name'] not in conf.blacklist:
+ testValid.append(tc.TestCase(case['name'],
+ case['name'],
+ case['dependencies']))
elif tier['order'] > 3:
for case in tier['testcases']:
elif tier['order'] > 3:
for case in tier['testcases']:
- otherTestCases.append(tc.TestCase(case['name'],
- "functest",
- case['dependencies']))
+ if case['name'] not in conf.blacklist:
+ otherTestCases.append(tc.TestCase(case['name'],
+ "functest",
+ case['dependencies']))
# For all the versions
for version in conf.versions:
# For all the versions
for version in conf.versions:
# Check if test case is runnable / installer, scenario
# for the test case used for Scenario validation
try:
# Check if test case is runnable / installer, scenario
# for the test case used for Scenario validation
try:
- print ("---------------------------------")
- print ("installer %s, version %s, scenario %s:" %
- (installer, version, s))
+ logger.info("---------------------------------")
+ logger.info("installer %s, version %s, scenario %s:" %
+ (installer, version, s))
# 1) Manage the test cases for the scenario validation
# concretely Tiers 0-3
# 1) Manage the test cases for the scenario validation
# concretely Tiers 0-3
- for test_case in testCases4Validation:
+ for test_case in testValid:
test_case.checkRunnable(installer, s,
test_case.getConstraints())
test_case.checkRunnable(installer, s,
test_case.getConstraints())
- print ("testcase %s is %s" % (test_case.getName(),
- test_case.isRunnable))
+ logger.debug("testcase %s is %s" % (test_case.getName(),
+ test_case.isRunnable))
time.sleep(1)
if test_case.isRunnable:
dbName = test_case.getDbName()
name = test_case.getName()
project = test_case.getProject()
nb_test_runnable_for_this_scenario += 1
time.sleep(1)
if test_case.isRunnable:
dbName = test_case.getDbName()
name = test_case.getName()
project = test_case.getProject()
nb_test_runnable_for_this_scenario += 1
- print (" Searching results for case %s " %
- (dbName))
+ logger.info(" Searching results for case %s " %
+ (dbName))
result = utils.getResult(dbName, installer, s, version)
result = utils.getResult(dbName, installer, s, version)
- print " >>>> Test result=" + str(result)
+ logger.info(" >>>> Test score = " + str(result))
test_case.setCriteria(result)
test_case.setIsRunnable(True)
testCases2BeDisplayed.append(tc.TestCase(name,
test_case.setCriteria(result)
test_case.setIsRunnable(True)
testCases2BeDisplayed.append(tc.TestCase(name,
for test_case in otherTestCases:
test_case.checkRunnable(installer, s,
test_case.getConstraints())
for test_case in otherTestCases:
test_case.checkRunnable(installer, s,
test_case.getConstraints())
- print ("testcase %s is %s" % (test_case.getName(),
- test_case.isRunnable))
+ logger.info("testcase %s is %s" %
+ (test_case.getName(), test_case.isRunnable))
time.sleep(1)
if test_case.isRunnable:
dbName = test_case.getDbName()
name = test_case.getName()
project = test_case.getProject()
time.sleep(1)
if test_case.isRunnable:
dbName = test_case.getDbName()
name = test_case.getName()
project = test_case.getProject()
- print (" Searching results for case %s " %
- (dbName))
+ logger.info(" Searching results for case %s " %
+ (dbName))
result = utils.getResult(dbName, installer, s, version)
test_case.setCriteria(result)
test_case.setIsRunnable(True)
result = utils.getResult(dbName, installer, s, version)
test_case.setCriteria(result)
test_case.setIsRunnable(True)
items[s] = testCases2BeDisplayed
except:
items[s] = testCases2BeDisplayed
except:
- print ("Error: installer %s, version %s, scenario %s" %
- (installer, version, s))
- print "No data available , error %s " % (sys.exc_info()[0])
+ logger.error("Error: installer %s, version %s, scenario %s" %
+ (installer, version, s))
+ logger.error("No data available: %s " % (sys.exc_info()[0]))
# **********************************************
# Evaluate the results for scenario validation
# **********************************************
# Evaluate the results for scenario validation
s_score = str(scenario_score) + "/" + str(scenario_criteria)
s_status = "KO"
if scenario_score < scenario_criteria:
s_score = str(scenario_score) + "/" + str(scenario_criteria)
s_status = "KO"
if scenario_score < scenario_criteria:
- print (">>>> scenario not OK, score = %s/%s" %
- (scenario_score, scenario_criteria))
+ logger.info(">>>> scenario not OK, score = %s/%s" %
+ (scenario_score, scenario_criteria))
- print ">>>>> scenario OK, save the information"
+ logger.info(">>>>> scenario OK, save the information")
s_status = "OK"
path_validation_file = ("./release/" + version +
"/validated_scenario_history.txt")
s_status = "OK"
path_validation_file = ("./release/" + version +
"/validated_scenario_history.txt")
f.write(info)
scenario_result_criteria[s] = sr.ScenarioResult(s_status, s_score)
f.write(info)
scenario_result_criteria[s] = sr.ScenarioResult(s_status, s_score)
- print "--------------------------"
+ logger.info("--------------------------")
templateLoader = jinja2.FileSystemLoader(os.path.dirname
(os.path.abspath
templateLoader = jinja2.FileSystemLoader(os.path.dirname
(os.path.abspath
#
# ****************************************************
installers = ["apex", "compass", "fuel", "joid"]
#
# ****************************************************
installers = ["apex", "compass", "fuel", "joid"]
-# installers = ["compass"]
+# installers = ["apex"]
+# list of test cases declared in testcases.yaml but that must not be
+# taken into account for the scoring
+blacklist = ["odl", "ovno", "security_scan"]
# versions = ["brahmaputra", "master"]
versions = ["master"]
PERIOD = 10
MAX_SCENARIO_CRITERIA = 18
# versions = ["brahmaputra", "master"]
versions = ["master"]
PERIOD = 10
MAX_SCENARIO_CRITERIA = 18
+# get the last 5 test results to determinate the success criteria
+NB_TESTS = 5
URL_BASE = 'http://testresults.opnfv.org/test/api/v1/results'
URL_BASE = 'http://testresults.opnfv.org/test/api/v1/results'
+TEST_CONF = "https://git.opnfv.org/cgit/functest/plain/ci/testcases.yaml"
+LOG_LEVEL = "INFO"
+LOG_FILE = "reporting.log"
# "&period=30&installer=" + installer
url = (reportingConf.URL_BASE + "?case=" + case +
"&period=" + str(reportingConf.PERIOD) + "&installer=" + installer +
# "&period=30&installer=" + installer
url = (reportingConf.URL_BASE + "?case=" + case +
"&period=" + str(reportingConf.PERIOD) + "&installer=" + installer +
- "&scenario=" + scenario + "&version=" + version)
+ "&scenario=" + scenario + "&version=" + version +
+ "&last=" + str(reportingConf.NB_TESTS))
request = Request(url)
try:
request = Request(url)
try:
# print "nb of results:" + str(len(test_results))
for r in test_results:
# print "nb of results:" + str(len(test_results))
for r in test_results:
- # print r["creation_date"]
+ # print r["start_date"]
# print r["criteria"]
scenario_results.append({r["start_date"]: r["criteria"]})
# sort results
# print r["criteria"]
scenario_results.append({r["start_date"]: r["criteria"]})
# sort results
# 0: 0% success, not passing
test_result_indicator = 0
nbTestOk = getNbtestOk(scenario_results)
# 0: 0% success, not passing
test_result_indicator = 0
nbTestOk = getNbtestOk(scenario_results)
- # print "Nb test OK:"+ str(nbTestOk)
+ # print "Nb test OK (last 10 days):"+ str(nbTestOk)
# check that we have at least 4 runs
if nbTestOk < 1:
test_result_indicator = 0
# check that we have at least 4 runs
if nbTestOk < 1:
test_result_indicator = 0
# Test the last 4 run
if (len(scenario_results) > 3):
last4runResults = scenario_results[-4:]
# Test the last 4 run
if (len(scenario_results) > 3):
last4runResults = scenario_results[-4:]
- if getNbtestOk(last4runResults):
+ nbTestOkLast4 = getNbtestOk(last4runResults)
+ # print "Nb test OK (last 4 run):"+ str(nbTestOkLast4)
+ if nbTestOkLast4 > 3:
test_result_indicator = 3
else:
test_result_indicator = 2
test_result_indicator = 3
else:
test_result_indicator = 2