3 # Copyright (c) 2016 Ericsson AB and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 """ The entry of running tests:
11 1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
30 import functest.ci.tier_builder as tb
31 import functest.core.testcase as testcase
32 from functest.utils.constants import CONST
34 # __name__ cannot be used here
35 LOGGER = logging.getLogger('functest.ci.run_tests')
37 CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
38 'functest', 'ci/config_functest.yaml')
41 class Result(enum.Enum):
42 """The overall result in enumerated type"""
43 # pylint: disable=too-few-public-methods
48 class BlockingTestFailed(Exception):
49 """Exception when the blocking test fails"""
53 class TestNotEnabled(Exception):
54 """Exception when the test is not enabled"""
58 class RunTestsParser(object):
59 """Parser to run tests"""
60 # pylint: disable=too-few-public-methods
63 self.parser = argparse.ArgumentParser()
64 self.parser.add_argument("-t", "--test", dest="test", action='store',
65 help="Test case or tier (group of tests) "
66 "to be executed. It will run all the test "
68 self.parser.add_argument("-n", "--noclean", help="Do not clean "
69 "OpenStack resources after running each "
70 "test (default=false).",
72 self.parser.add_argument("-r", "--report", help="Push results to "
73 "database (default=false).",
76 def parse_args(self, argv=None):
79 It can call sys.exit if arguments are incorrect.
82 the arguments from cmdline
84 return vars(self.parser.parse_args(argv))
91 self.executed_test_cases = {}
92 self.overall_result = Result.EX_OK
93 self.clean_flag = True
94 self.report_flag = False
95 self.tiers = tb.TierBuilder(
96 CONST.__getattribute__('INSTALLER_TYPE'),
97 CONST.__getattribute__('DEPLOY_SCENARIO'),
98 pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
101 def source_envfile(rc_file):
102 """Source the env file passed as arg"""
103 with open(rc_file, "r") as rcfd:
105 var = (line.rstrip('"\n').replace('export ', '').split(
106 "=") if re.search(r'(.*)=(.*)', line) else None)
107 # The two next lines should be modified as soon as rc_file
108 # conforms with common rules. Be aware that it could induce
109 # issues if value starts with '
111 key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
112 value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
113 os.environ[key] = value
114 setattr(CONST, key, value)
117 def get_dict_by_test(testname):
118 # pylint: disable=bad-continuation,missing-docstring
119 with open(pkg_resources.resource_filename(
120 'functest', 'ci/testcases.yaml')) as tyaml:
121 testcases_yaml = yaml.safe_load(tyaml)
122 for dic_tier in testcases_yaml.get("tiers"):
123 for dic_testcase in dic_tier['testcases']:
124 if dic_testcase['case_name'] == testname:
126 LOGGER.error('Project %s is not defined in testcases.yaml', testname)
130 def get_run_dict(testname):
131 """Obtain the 'run' block of the testcase from testcases.yaml"""
133 dic_testcase = Runner.get_dict_by_test(testname)
135 LOGGER.error("Cannot get %s's config options", testname)
136 elif 'run' in dic_testcase:
137 return dic_testcase['run']
139 except Exception: # pylint: disable=broad-except
140 LOGGER.exception("Cannot get %s's config options", testname)
143 def run_test(self, test):
144 """Run one test case"""
145 if not test.is_enabled():
146 raise TestNotEnabled(
147 "The test case {} is not enabled".format(test.get_name()))
148 LOGGER.info("Running test case '%s'...", test.get_name())
149 result = testcase.TestCase.EX_RUN_ERROR
150 run_dict = self.get_run_dict(test.get_name())
153 module = importlib.import_module(run_dict['module'])
154 cls = getattr(module, run_dict['class'])
155 test_dict = Runner.get_dict_by_test(test.get_name())
156 test_case = cls(**test_dict)
157 self.executed_test_cases[test.get_name()] = test_case
159 kwargs = run_dict['args']
160 test_case.run(**kwargs)
164 test_case.push_to_db()
165 if test.get_project() == "functest":
166 result = test_case.is_successful()
168 result = testcase.TestCase.EX_OK
169 LOGGER.info("Test result:\n\n%s\n", test_case)
173 LOGGER.exception("Cannot import module %s", run_dict['module'])
174 except AttributeError:
175 LOGGER.exception("Cannot get class %s", run_dict['class'])
177 raise Exception("Cannot import the class for the test case.")
180 def run_tier(self, tier):
182 tier_name = tier.get_name()
183 tests = tier.get_tests()
185 LOGGER.info("There are no supported test cases in this tier "
186 "for the given scenario")
187 self.overall_result = Result.EX_ERROR
189 LOGGER.info("Running tier '%s'", tier_name)
192 test_case = self.executed_test_cases[test.get_name()]
193 if test_case.is_successful() != testcase.TestCase.EX_OK:
194 LOGGER.error("The test case '%s' failed.", test.get_name())
195 if test.get_project() == "functest":
196 self.overall_result = Result.EX_ERROR
197 if test.is_blocking():
198 raise BlockingTestFailed(
199 "The test case {} failed and is blocking".format(
201 return self.overall_result
204 """Run all available testcases"""
206 msg = prettytable.PrettyTable(
207 header_style='upper', padding_width=5,
208 field_names=['tiers', 'order', 'CI Loop', 'description',
210 for tier in self.tiers.get_tiers():
211 if (tier.get_tests() and
212 re.search(CONST.__getattribute__('CI_LOOP'),
213 tier.get_ci_loop()) is not None):
214 tiers_to_run.append(tier)
215 msg.add_row([tier.get_name(), tier.get_order(),
217 textwrap.fill(tier.description, width=40),
218 textwrap.fill(' '.join([str(x.get_name(
219 )) for x in tier.get_tests()]), width=40)])
220 LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
221 for tier in tiers_to_run:
224 def main(self, **kwargs):
225 """Entry point of class Runner"""
226 if 'noclean' in kwargs:
227 self.clean_flag = not kwargs['noclean']
228 if 'report' in kwargs:
229 self.report_flag = kwargs['report']
232 LOGGER.debug("Sourcing the credential file...")
233 self.source_envfile(getattr(CONST, 'env_file'))
235 LOGGER.debug("Test args: %s", kwargs['test'])
236 if self.tiers.get_tier(kwargs['test']):
237 self.run_tier(self.tiers.get_tier(kwargs['test']))
238 elif self.tiers.get_test(kwargs['test']):
239 result = self.run_test(
240 self.tiers.get_test(kwargs['test']))
241 if result != testcase.TestCase.EX_OK:
242 LOGGER.error("The test case '%s' failed.",
244 self.overall_result = Result.EX_ERROR
245 elif kwargs['test'] == "all":
248 LOGGER.error("Unknown test case or tier '%s', or not "
249 "supported by the given scenario '%s'.",
251 CONST.__getattribute__('DEPLOY_SCENARIO'))
252 LOGGER.debug("Available tiers are:\n\n%s",
254 return Result.EX_ERROR
257 except BlockingTestFailed:
259 except Exception: # pylint: disable=broad-except
260 LOGGER.exception("Failures when running testcase(s)")
261 self.overall_result = Result.EX_ERROR
262 if not self.tiers.get_test(kwargs['test']):
263 self.summary(self.tiers.get_tier(kwargs['test']))
264 LOGGER.info("Execution exit value: %s", self.overall_result)
265 return self.overall_result
267 def summary(self, tier=None):
268 """To generate functest report showing the overall results"""
269 msg = prettytable.PrettyTable(
270 header_style='upper', padding_width=5,
271 field_names=['env var', 'value'])
272 for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
274 msg.add_row([env_var, CONST.__getattribute__(env_var)])
275 LOGGER.info("Deployment description:\n\n%s\n", msg)
276 msg = prettytable.PrettyTable(
277 header_style='upper', padding_width=5,
278 field_names=['test case', 'project', 'tier',
279 'duration', 'result'])
280 tiers = [tier] if tier else self.tiers.get_tiers()
281 for each_tier in tiers:
282 for test in each_tier.get_tests():
284 test_case = self.executed_test_cases[test.get_name()]
286 msg.add_row([test.get_name(), test.get_project(),
287 each_tier.get_name(), "00:00", "SKIP"])
289 result = 'PASS' if(test_case.is_successful(
290 ) == test_case.EX_OK) else 'FAIL'
292 [test_case.case_name, test_case.project_name,
293 self.tiers.get_tier_name(test_case.case_name),
294 test_case.get_duration(), result])
295 for test in each_tier.get_skipped_test():
296 msg.add_row([test.get_name(), test.get_project(),
297 each_tier.get_name(), "00:00", "SKIP"])
298 LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
303 logging.config.fileConfig(pkg_resources.resource_filename(
304 'functest', 'ci/logging.ini'))
305 logging.captureWarnings(True)
306 parser = RunTestsParser()
307 args = parser.parse_args(sys.argv[1:])
309 return runner.main(**args).value