3 # Copyright (c) 2016 Ericsson AB and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
24 import functest.ci.tier_builder as tb
25 import functest.core.testcase as testcase
26 import functest.utils.functest_utils as ft_utils
27 import functest.utils.openstack_utils as os_utils
28 from functest.utils.constants import CONST
30 # __name__ cannot be used here
31 logger = logging.getLogger('functest.ci.run_tests')
33 CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
34 'functest', 'ci/config_functest.yaml')
35 CONFIG_PATCH_PATH = pkg_resources.resource_filename(
36 'functest', 'ci/config_patch.yaml')
37 CONFIG_AARCH64_PATCH_PATH = pkg_resources.resource_filename(
38 'functest', 'ci/config_aarch64_patch.yaml')
39 # set the architecture to default
40 pod_arch = os.getenv("POD_ARCH", None)
41 arch_filter = ['aarch64']
44 class Result(enum.Enum):
49 class BlockingTestFailed(Exception):
53 class TestNotEnabled(Exception):
57 class RunTestsParser(object):
60 self.parser = argparse.ArgumentParser()
61 self.parser.add_argument("-t", "--test", dest="test", action='store',
62 help="Test case or tier (group of tests) "
63 "to be executed. It will run all the test "
65 self.parser.add_argument("-n", "--noclean", help="Do not clean "
66 "OpenStack resources after running each "
67 "test (default=false).",
69 self.parser.add_argument("-r", "--report", help="Push results to "
70 "database (default=false).",
73 def parse_args(self, argv=[]):
74 return vars(self.parser.parse_args(argv))
80 self.executed_test_cases = {}
81 self.overall_result = Result.EX_OK
82 self.clean_flag = True
83 self.report_flag = False
84 self._tiers = tb.TierBuilder(
85 CONST.__getattribute__('INSTALLER_TYPE'),
86 CONST.__getattribute__('DEPLOY_SCENARIO'),
87 pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
90 def update_config_file():
91 Runner.patch_file(CONFIG_PATCH_PATH)
93 if pod_arch and pod_arch in arch_filter:
94 Runner.patch_file(CONFIG_AARCH64_PATCH_PATH)
96 if "TEST_DB_URL" in os.environ:
97 Runner.update_db_url()
100 def patch_file(patch_file_path):
101 logger.debug('Updating file: %s', patch_file_path)
102 with open(patch_file_path) as f:
103 patch_file = yaml.safe_load(f)
106 for key in patch_file:
107 if key in CONST.__getattribute__('DEPLOY_SCENARIO'):
108 new_functest_yaml = dict(ft_utils.merge_dicts(
109 ft_utils.get_functest_yaml(), patch_file[key]))
113 os.remove(CONFIG_FUNCTEST_PATH)
114 with open(CONFIG_FUNCTEST_PATH, "w") as f:
115 f.write(yaml.dump(new_functest_yaml, default_style='"'))
119 with open(CONFIG_FUNCTEST_PATH) as f:
120 functest_yaml = yaml.safe_load(f)
122 with open(CONFIG_FUNCTEST_PATH, "w") as f:
123 functest_yaml["results"]["test_db_url"] = os.environ.get(
125 f.write(yaml.dump(functest_yaml, default_style='"'))
128 def source_rc_file():
129 rc_file = CONST.__getattribute__('openstack_creds')
130 if not os.path.isfile(rc_file):
131 raise Exception("RC file %s does not exist..." % rc_file)
132 logger.debug("Sourcing the OpenStack RC file...")
133 os_utils.source_credentials(rc_file)
134 for key, value in os.environ.iteritems():
135 if re.search("OS_", key):
136 if key == 'OS_AUTH_URL':
137 CONST.__setattr__('OS_AUTH_URL', value)
138 elif key == 'OS_USERNAME':
139 CONST.__setattr__('OS_USERNAME', value)
140 elif key == 'OS_TENANT_NAME':
141 CONST.__setattr__('OS_TENANT_NAME', value)
142 elif key == 'OS_PASSWORD':
143 CONST.__setattr__('OS_PASSWORD', value)
144 elif key == "OS_PROJECT_DOMAIN_NAME":
145 CONST.__setattr__('OS_PROJECT_DOMAIN_NAME', value)
148 def get_run_dict(testname):
150 dict = ft_utils.get_dict_by_test(testname)
152 logger.error("Cannot get {}'s config options".format(testname))
157 logger.exception("Cannot get {}'s config options".format(testname))
160 def run_test(self, test):
161 if not test.is_enabled():
162 raise TestNotEnabled(
163 "The test case {} is not enabled".format(test.get_name()))
164 logger.info("Running test case '%s'...", test.get_name())
165 result = testcase.TestCase.EX_RUN_ERROR
166 run_dict = self.get_run_dict(test.get_name())
169 module = importlib.import_module(run_dict['module'])
170 cls = getattr(module, run_dict['class'])
171 test_dict = ft_utils.get_dict_by_test(test.get_name())
172 test_case = cls(**test_dict)
173 self.executed_test_cases[test.get_name()] = test_case
175 if test_case.create_snapshot() != test_case.EX_OK:
176 return testcase.TestCase.EX_RUN_ERROR
178 kwargs = run_dict['args']
179 test_case.run(**kwargs)
183 test_case.push_to_db()
184 if test.get_project() == "functest":
185 result = test_case.is_successful()
187 result = testcase.TestCase.EX_OK
188 logger.info("Test result:\n\n%s\n", test_case)
192 logger.exception("Cannot import module {}".format(
194 except AttributeError:
195 logger.exception("Cannot get class {}".format(
198 raise Exception("Cannot import the class for the test case.")
201 def run_tier(self, tier):
202 tier_name = tier.get_name()
203 tests = tier.get_tests()
204 if tests is None or len(tests) == 0:
205 logger.info("There are no supported test cases in this tier "
206 "for the given scenario")
207 self.overall_result = Result.EX_ERROR
209 logger.info("Running tier '%s'" % tier_name)
212 test_case = self.executed_test_cases[test.get_name()]
213 if test_case.is_successful() != testcase.TestCase.EX_OK:
214 logger.error("The test case '%s' failed.", test.get_name())
215 if test.get_project() == "functest":
216 self.overall_result = Result.EX_ERROR
217 if test.is_blocking():
218 raise BlockingTestFailed(
219 "The test case {} failed and is blocking".format(
221 return self.overall_result
225 msg = prettytable.PrettyTable(
226 header_style='upper', padding_width=5,
227 field_names=['tiers', 'order', 'CI Loop', 'description',
229 for tier in self._tiers.get_tiers():
230 if (len(tier.get_tests()) != 0 and
231 re.search(CONST.__getattribute__('CI_LOOP'),
232 tier.get_ci_loop()) is not None):
233 tiers_to_run.append(tier)
234 msg.add_row([tier.get_name(), tier.get_order(),
236 textwrap.fill(tier.description, width=40),
237 textwrap.fill(' '.join([str(x.get_name(
238 )) for x in tier.get_tests()]), width=40)])
239 logger.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
240 for tier in tiers_to_run:
243 def main(self, **kwargs):
244 Runner.update_config_file()
246 if 'noclean' in kwargs:
247 self.clean_flag = not kwargs['noclean']
248 if 'report' in kwargs:
249 self.report_flag = kwargs['report']
252 self.source_rc_file()
253 logger.debug("Test args: %s", kwargs['test'])
254 if self._tiers.get_tier(kwargs['test']):
255 self.run_tier(self._tiers.get_tier(kwargs['test']))
256 elif self._tiers.get_test(kwargs['test']):
257 result = self.run_test(
258 self._tiers.get_test(kwargs['test']))
259 if result != testcase.TestCase.EX_OK:
260 logger.error("The test case '%s' failed.",
262 self.overall_result = Result.EX_ERROR
263 elif kwargs['test'] == "all":
266 logger.error("Unknown test case or tier '%s', "
267 "or not supported by "
268 "the given scenario '%s'."
270 CONST.__getattribute__('DEPLOY_SCENARIO')))
271 logger.debug("Available tiers are:\n\n%s",
273 return Result.EX_ERROR
276 except BlockingTestFailed:
279 logger.exception("Failures when running testcase(s)")
280 self.overall_result = Result.EX_ERROR
281 if not self._tiers.get_test(kwargs['test']):
282 self.summary(self._tiers.get_tier(kwargs['test']))
283 logger.info("Execution exit value: %s" % self.overall_result)
284 return self.overall_result
286 def summary(self, tier=None):
287 msg = prettytable.PrettyTable(
288 header_style='upper', padding_width=5,
289 field_names=['env var', 'value'])
290 for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
292 msg.add_row([env_var, CONST.__getattribute__(env_var)])
293 logger.info("Deployment description:\n\n%s\n", msg)
294 msg = prettytable.PrettyTable(
295 header_style='upper', padding_width=5,
296 field_names=['test case', 'project', 'tier',
297 'duration', 'result'])
298 tiers = [tier] if tier else self._tiers.get_tiers()
300 for test in tier.get_tests():
302 test_case = self.executed_test_cases[test.get_name()]
304 msg.add_row([test.get_name(), test.get_project(),
305 tier.get_name(), "00:00", "SKIP"])
307 result = 'PASS' if(test_case.is_successful(
308 ) == test_case.EX_OK) else 'FAIL'
310 [test_case.case_name, test_case.project_name,
311 self._tiers.get_tier_name(test_case.case_name),
312 test_case.get_duration(), result])
313 for test in tier.get_skipped_test():
314 msg.add_row([test.get_name(), test.get_project(),
315 tier.get_name(), "00:00", "SKIP"])
316 logger.info("FUNCTEST REPORT:\n\n%s\n", msg)
320 logging.config.fileConfig(pkg_resources.resource_filename(
321 'functest', 'ci/logging.ini'))
322 logging.captureWarnings(True)
323 parser = RunTestsParser()
324 args = parser.parse_args(sys.argv[1:])
326 return runner.main(**args).value