It mainly avoids mixing input and output.
Criteria is now an input set in functest/ci/testcases.yaml and then
must be passed as __init__() args (which will be proposed in an
additional change).
Then it also renames the related TestCase method to
check_result().
Change-Id: Ifc3c8e3ea6cde7e3edf7174bed4bf2bf0894e8e3
Signed-off-by: Cédric Ollivier <cedric.ollivier@orange.com>
if result == testcase.TestCase.EX_OK:
if GlobalVariables.REPORT_FLAG:
test_case.push_to_db()
- result = test_case.check_criteria()
+ result = test_case.check_result()
except ImportError:
logger.exception("Cannot import module {}".format(
run_dict['module']))
It sets the following attributes required to push the results
to DB:
- * criteria,
+ * result,
* start_time,
* stop_time.
"""
self.start_time = time.time()
exit_code = base.TestCase.EX_RUN_ERROR
- self.criteria = "FAIL"
+ self.result = "FAIL"
try:
if self.execute(**kwargs) == 0:
exit_code = base.TestCase.EX_OK
- self.criteria = 'PASS'
+ self.result = 'PASS'
ft_utils.logger_test_results(
self.project_name, self.case_name,
- self.criteria, self.details)
- self.logger.info("%s %s", self.project_name, self.criteria)
+ self.result, self.details)
+ self.logger.info("%s %s", self.project_name, self.result)
except Exception: # pylint: disable=broad-except
self.logger.exception("%s FAILED", self.project_name)
self.logger.info("Test result is stored in '%s'", self.result_file)
if ((result.errors and len(result.errors) > 0)
or (result.failures and len(result.failures) > 0)):
self.logger.info("%s FAILED" % self.case_name)
- self.criteria = 'FAIL'
+ self.result = 'FAIL'
else:
self.logger.info("%s OK" % self.case_name)
- self.criteria = 'PASS'
+ self.result = 'PASS'
self.details = {}
return exit_code
self.details = {}
self.project_name = kwargs.get('project_name', 'functest')
self.case_name = kwargs.get('case_name', '')
- self.criteria = ""
+ self.result = ""
self.start_time = ""
self.stop_time = ""
- def check_criteria(self):
- """Interpret the results of the test case.
+ def check_result(self):
+ """Interpret the result of the test case.
- It allows getting the results of TestCase. It completes run()
+ It allows getting the result of TestCase. It completes run()
which only returns the execution status.
- It can be overriden if checking criteria is not suitable.
+ It can be overriden if checking result is not suitable.
Returns:
- TestCase.EX_OK if criteria is 'PASS'.
+ TestCase.EX_OK if result is 'PASS'.
TestCase.EX_TESTCASE_FAILED otherwise.
"""
try:
- assert self.criteria
- if self.criteria == 'PASS':
+ assert self.result
+ if self.result == 'PASS':
return TestCase.EX_OK
except AssertionError:
self.logger.error("Please run test before checking the results")
The new implementation must set the following attributes to
push the results to DB:
- * criteria,
+ * result,
* start_time,
* stop_time.
* project_name,
* case_name,
- * criteria,
+ * result,
* start_time,
* stop_time.
try:
assert self.project_name
assert self.case_name
- assert self.criteria
+ assert self.result
assert self.start_time
assert self.stop_time
if ft_utils.push_results_to_db(
self.project_name, self.case_name, self.start_time,
- self.stop_time, self.criteria, self.details):
+ self.stop_time, self.result, self.details):
self.logger.info("The results were successfully pushed to DB")
return TestCase.EX_OK
else:
def parse_results(self):
exit_code = self.EX_OK
- self.criteria = "PASS"
+ self.result = "PASS"
self.logger.info(self.details)
# The 2 VNF steps must be OK to get a PASS result
if (self.details['vnf']['status'] is not "PASS" or
self.details['test_vnf']['status'] is not "PASS"):
exit_code = self.EX_RUN_ERROR
- self.criteria = "FAIL"
+ self.result = "FAIL"
return exit_code
def log_results(self):
ft_utils.logger_test_results(self.project_name,
self.case_name,
- self.criteria,
+ self.result,
self.details)
def step_failure(self, error_msg):
'nb tests': total_nb_tests,
'nb success': success_rate}})
- self.criteria = ft_utils.check_success_rate(
+ self.result = ft_utils.check_success_rate(
self.case_name, success_rate)
self.details = payload
logger.info("Rally '%s' success_rate is %s%%, is marked as %s"
- % (self.case_name, success_rate, self.criteria))
+ % (self.case_name, success_rate, self.result))
def _clean_up(self):
if self.volume_type:
except Exception:
success_rate = 0
- self.criteria = ft_utils.check_success_rate(
+ self.result = ft_utils.check_success_rate(
self.case_name, success_rate)
logger.info("Testcase %s success_rate is %s%%, is marked as %s"
- % (self.case_name, success_rate, self.criteria))
+ % (self.case_name, success_rate, self.result))
def run(self):
'''used for functest command line,
except Exception:
success_rate = 0
- self.criteria = ft_utils.check_success_rate(
+ self.result = ft_utils.check_success_rate(
self.case_name, success_rate)
logger.info("Tempest %s success_rate is %s%%, is marked as %s"
- % (self.case_name, success_rate, self.criteria))
+ % (self.case_name, success_rate, self.result))
def run(self):
self.details = {'timestart': start_time,
'duration': duration,
'status': test_status}
- self.criteria = test_status
+ self.result = test_status
@staticmethod
def pMsg(msg):
result = robot.api.ExecutionResult(xml_file)
visitor = ODLResultVisitor()
result.visit(visitor)
- self.criteria = result.suite.status
+ self.result = result.suite.status
self.start_time = timestamp_to_secs(result.suite.starttime)
self.stop_time = timestamp_to_secs(result.suite.endtime)
self.details = {}
result['FUNCvirNetL3']['result'] == "Success"):
status = "PASS"
except:
- logger.error("Unable to set ONOS criteria")
+ logger.error("Unable to set ONOS result")
- self.criteria = status
+ self.result = status
self.details = result
def _run(self):
def main(self, **kwargs):
self.logger.info("AAA VNF onboarding")
self.execute()
- if self.criteria is "PASS":
+ if self.result is "PASS":
return self.EX_OK
else:
return self.EX_RUN_ERROR
self.logger.info("Cloudify IMS VNF onboarding test starting")
self.execute()
self.logger.info("Cloudify IMS VNF onboarding test executed")
- if self.criteria is "PASS":
+ if self.result is "PASS":
return self.EX_OK
else:
return self.EX_RUN_ERROR
self.logger.info("Start to run Opera vIMS VNF onboarding test")
self.execute()
self.logger.info("Opera vIMS VNF onboarding test finished")
- if self.criteria is "PASS":
+ if self.result is "PASS":
return self.EX_OK
else:
return self.EX_RUN_ERROR
self.logger.info("Orchestra IMS VNF onboarding test starting")
self.execute()
self.logger.info("Orchestra IMS VNF onboarding test executed")
- if self.criteria is "PASS":
+ if self.result is "PASS":
return self.EX_OK
else:
return self.EX_RUN_ERROR
f.close()
def log_results(self):
- if self.criteria == 'PASS':
+ if self.result == 'PASS':
self.set_result_details()
super(VrouterVnf, self).log_results()
def _test_run(self, status, mock_method=None):
self.assertEqual(self.feature.run(cmd=self._cmd), status)
if status == testcase.TestCase.EX_OK:
- self.assertEqual(self.feature.criteria, 'PASS')
+ self.assertEqual(self.feature.result, 'PASS')
else:
- self.assertEqual(self.feature.criteria, 'FAIL')
+ self.assertEqual(self.feature.result, 'FAIL')
mock_method.assert_has_calls([mock.call(), mock.call()])
self.assertEqual(self.feature.start_time, 1)
self.assertEqual(self.feature.stop_time, 2)
project_name=self._project_name)
self.test.start_time = "1"
self.test.stop_time = "2"
- self.test.criteria = "PASS"
+ self.test.result = "PASS"
self.test.details = {"Hello": "World"}
def test_run_unimplemented(self):
self._test_missing_attribute()
def test_missing_criteria(self):
- self.test.criteria = None
+ self.test.result = None
self._test_missing_attribute()
def test_missing_start_time(self):
testcase.TestCase.EX_OK)
mock_function.assert_called_once_with(
self._project_name, self._case_name, self.test.start_time,
- self.test.stop_time, self.test.criteria, self.test.details)
+ self.test.stop_time, self.test.result, self.test.details)
@mock.patch('functest.utils.functest_utils.push_results_to_db',
return_value=False)
testcase.TestCase.EX_PUSH_TO_DB_ERROR)
mock_function.assert_called_once_with(
self._project_name, self._case_name, self.test.start_time,
- self.test.stop_time, self.test.criteria, self.test.details)
+ self.test.stop_time, self.test.result, self.test.details)
@mock.patch('functest.utils.functest_utils.push_results_to_db',
return_value=True)
testcase.TestCase.EX_OK)
mock_function.assert_called_once_with(
self._project_name, self._case_name, self.test.start_time,
- self.test.stop_time, self.test.criteria, self.test.details)
+ self.test.stop_time, self.test.result, self.test.details)
- def test_check_criteria_missing(self):
- self.test.criteria = None
- self.assertEqual(self.test.check_criteria(),
+ def test_check_result_missing(self):
+ self.test.result = None
+ self.assertEqual(self.test.check_result(),
testcase.TestCase.EX_TESTCASE_FAILED)
- def test_check_criteria_failed(self):
- self.test.criteria = 'FAILED'
- self.assertEqual(self.test.check_criteria(),
+ def test_check_result_failed(self):
+ self.test.result = 'FAILED'
+ self.assertEqual(self.test.check_result(),
testcase.TestCase.EX_TESTCASE_FAILED)
- def test_check_criteria_pass(self):
- self.test.criteria = 'PASS'
- self.assertEqual(self.test.check_criteria(),
+ def test_check_result_pass(self):
+ self.test.result = 'PASS'
+ self.assertEqual(self.test.check_result(),
testcase.TestCase.EX_OK)
self.test.project = "functest"
self.test.start_time = "1"
self.test.stop_time = "5"
- self.test.criteria = ""
+ self.test.result = ""
self.test.details = {"orchestrator": {"status": "PASS",
"result": "",
"duration": 20},
with mock.patch('robot.api.ExecutionResult',
return_value=mock.Mock(suite=suite)):
self.test.parse_results()
- self.assertEqual(self.test.criteria, config['status'])
+ self.assertEqual(self.test.result, config['status'])
self.assertEqual(self.test.start_time,
timestamp_to_secs(config['starttime']))
self.assertEqual(self.test.stop_time,
self.db_url = 'test_db_url'
self.success_rate = 2.0
self.criteria = 'test_criteria==2.0'
+ self.result = 'PASS'
self.start_date = 1482624000
self.stop_date = 1482624000
self.start_time = time.time()
as mock_logger_error:
functest_utils.push_results_to_db(self.project, self.case_name,
self.start_date, self.stop_date,
- self.criteria, self.details)
+ self.result, self.details)
mock_logger_error.assert_called_once_with("Please set env var: " +
str("\'" + env_var +
"\'"))
push_results_to_db(self.project, self.case_name,
self.start_date,
self.stop_date,
- self.criteria, self.details))
+ self.result, self.details))
mock_logger_error.assert_called_once_with(test_utils.
RegexMatch("Pushing "
"Result to"
push_results_to_db(self.project, self.case_name,
self.start_date,
self.stop_date,
- self.criteria, self.details))
+ self.result, self.details))
self.assertTrue(mock_logger_error.called)
def test_push_results_to_db_default(self):
push_results_to_db(self.project, self.case_name,
self.start_date,
self.stop_date,
- self.criteria, self.details))
+ self.result, self.details))
readline = 0
test_ip = ['10.1.23.4', '10.1.14.15', '10.1.16.15']
@decorators.can_dump_request_to_file
def push_results_to_db(project, case_name,
- start_date, stop_date, criteria, details):
+ start_date, stop_date, result, details):
"""
POST results to the Result target DB
"""
params = {"project_name": project, "case_name": case_name,
"pod_name": pod_name, "installer": installer,
- "version": version, "scenario": scenario, "criteria": criteria,
+ "version": version, "scenario": scenario, "criteria": result,
"build_tag": build_tag, "start_date": test_start,
"stop_date": test_stop, "details": details}
'pod': pod_name,
'v': version,
's': scenario,
- 'c': criteria,
+ 'c': result,
't': build_tag,
'd': details,
'error': e