3 # Author: Jose Lausuch (jose.lausuch@ericsson.com)
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
22 import functest.ci.tier_builder as tb
23 import functest.core.testcase as testcase
24 import functest.utils.functest_utils as ft_utils
25 import functest.utils.openstack_clean as os_clean
26 import functest.utils.openstack_snapshot as os_snapshot
27 import functest.utils.openstack_utils as os_utils
28 from functest.utils.constants import CONST
30 # __name__ cannot be used here
31 logger = logging.getLogger('functest.ci.run_tests')
34 class Result(enum.Enum):
39 class BlockingTestFailed(Exception):
43 class TestNotEnabled(Exception):
47 class RunTestsParser(object):
50 self.parser = argparse.ArgumentParser()
51 self.parser.add_argument("-t", "--test", dest="test", action='store',
52 help="Test case or tier (group of tests) "
53 "to be executed. It will run all the test "
55 self.parser.add_argument("-n", "--noclean", help="Do not clean "
56 "OpenStack resources after running each "
57 "test (default=false).",
59 self.parser.add_argument("-r", "--report", help="Push results to "
60 "database (default=false).",
63 def parse_args(self, argv=[]):
64 return vars(self.parser.parse_args(argv))
70 self.executed_test_cases = []
71 self.overall_result = Result.EX_OK
72 self.clean_flag = True
73 self.report_flag = False
76 def print_separator(str, count=45):
78 for i in range(0, count - 1):
80 logger.info("%s" % line)
84 rc_file = CONST.__getattribute__('openstack_creds')
85 if not os.path.isfile(rc_file):
86 raise Exception("RC file %s does not exist..." % rc_file)
87 logger.debug("Sourcing the OpenStack RC file...")
88 os_utils.source_credentials(rc_file)
89 for key, value in os.environ.iteritems():
90 if re.search("OS_", key):
91 if key == 'OS_AUTH_URL':
92 CONST.__setattr__('OS_AUTH_URL', value)
93 elif key == 'OS_USERNAME':
94 CONST.__setattr__('OS_USERNAME', value)
95 elif key == 'OS_TENANT_NAME':
96 CONST.__setattr__('OS_TENANT_NAME', value)
97 elif key == 'OS_PASSWORD':
98 CONST.__setattr__('OS_PASSWORD', value)
101 def generate_os_snapshot():
109 def get_run_dict(testname):
111 dict = ft_utils.get_dict_by_test(testname)
113 logger.error("Cannot get {}'s config options".format(testname))
118 logger.exception("Cannot get {}'s config options".format(testname))
121 def run_test(self, test, tier_name, testcases=None):
122 if not test.is_enabled():
123 raise TestNotEnabled(
124 "The test case {} is not enabled".format(test.get_name()))
125 logger.info("\n") # blank line
126 self.print_separator("=")
127 logger.info("Running test case '%s'..." % test.get_name())
128 self.print_separator("=")
129 logger.debug("\n%s" % test)
130 self.source_rc_file()
132 if test.needs_clean() and self.clean_flag:
133 self.generate_os_snapshot()
135 flags = " -t %s" % test.get_name()
139 result = testcase.TestCase.EX_RUN_ERROR
140 run_dict = self.get_run_dict(test.get_name())
143 module = importlib.import_module(run_dict['module'])
144 cls = getattr(module, run_dict['class'])
145 test_dict = ft_utils.get_dict_by_test(test.get_name())
146 test_case = cls(**test_dict)
147 self.executed_test_cases.append(test_case)
149 kwargs = run_dict['args']
150 result = test_case.run(**kwargs)
152 result = test_case.run()
153 if result == testcase.TestCase.EX_OK:
155 test_case.push_to_db()
156 result = test_case.is_successful()
157 logger.info("Test result:\n\n%s\n", test_case)
159 logger.exception("Cannot import module {}".format(
161 except AttributeError:
162 logger.exception("Cannot get class {}".format(
165 raise Exception("Cannot import the class for the test case.")
167 if test.needs_clean() and self.clean_flag:
169 if result != testcase.TestCase.EX_OK:
170 logger.error("The test case '%s' failed. " % test.get_name())
171 self.overall_result = Result.EX_ERROR
172 if test.is_blocking():
173 raise BlockingTestFailed(
174 "The test case {} failed and is blocking".format(
177 def run_tier(self, tier):
178 tier_name = tier.get_name()
179 tests = tier.get_tests()
180 if tests is None or len(tests) == 0:
181 logger.info("There are no supported test cases in this tier "
182 "for the given scenario")
184 logger.info("\n\n") # blank line
185 self.print_separator("#")
186 logger.info("Running tier '%s'" % tier_name)
187 self.print_separator("#")
188 logger.debug("\n%s" % tier)
190 self.run_test(test, tier_name)
192 def run_all(self, tiers):
196 for tier in tiers.get_tiers():
197 if (len(tier.get_tests()) != 0 and
198 re.search(CONST.__getattribute__('CI_LOOP'),
199 tier.get_ci_loop()) is not None):
200 tiers_to_run.append(tier)
201 summary += ("\n - %s:\n\t %s"
203 tier.get_test_names()))
205 logger.info("Tests to be executed:%s" % summary)
206 for tier in tiers_to_run:
209 def main(self, **kwargs):
210 _tiers = tb.TierBuilder(
211 CONST.__getattribute__('INSTALLER_TYPE'),
212 CONST.__getattribute__('DEPLOY_SCENARIO'),
213 CONST.__getattribute__("functest_testcases_yaml"))
215 if kwargs['noclean']:
216 self.clean_flag = False
219 self.report_flag = True
223 self.source_rc_file()
224 logger.error(kwargs['test'])
225 if _tiers.get_tier(kwargs['test']):
226 self.run_tier(_tiers.get_tier(kwargs['test']))
227 elif _tiers.get_test(kwargs['test']):
228 self.run_test(_tiers.get_test(kwargs['test']),
229 _tiers.get_tier_name(kwargs['test']),
231 elif kwargs['test'] == "all":
234 logger.error("Unknown test case or tier '%s', "
235 "or not supported by "
236 "the given scenario '%s'."
238 CONST.__getattribute__('DEPLOY_SCENARIO')))
239 logger.debug("Available tiers are:\n\n%s",
241 return Result.EX_ERROR
244 except BlockingTestFailed:
247 logger.exception("Failures when running testcase(s)")
248 self.overall_result = Result.EX_ERROR
250 msg = prettytable.PrettyTable(
251 header_style='upper', padding_width=5,
252 field_names=['env var', 'value'])
253 for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
255 msg.add_row([env_var, CONST.__getattribute__(env_var)])
256 logger.info("Deployment description: \n\n%s\n", msg)
258 msg = prettytable.PrettyTable(
259 header_style='upper', padding_width=5,
260 field_names=['test case', 'project', 'tier', 'duration', 'result'])
261 for test_case in self.executed_test_cases:
262 result = 'PASS' if(test_case.is_successful(
263 ) == test_case.EX_OK) else 'FAIL'
264 msg.add_row([test_case.case_name, test_case.project_name,
265 _tiers.get_tier_name(test_case.case_name),
266 test_case.get_duration(), result])
267 logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
269 logger.info("Execution exit value: %s" % self.overall_result)
270 return self.overall_result
273 if __name__ == '__main__':
274 logging.config.fileConfig(
275 CONST.__getattribute__('dir_functest_logging_cfg'))
276 parser = RunTestsParser()
277 args = parser.parse_args(sys.argv[1:])
279 sys.exit(runner.main(**args).value)