3 # Copyright (c) 2016 Ericsson AB and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 """ The entry of running tests:
11 1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
29 import functest.ci.tier_builder as tb
30 import functest.core.testcase as testcase
31 import functest.utils.functest_utils as ft_utils
32 import functest.utils.openstack_utils as os_utils
33 from functest.utils.constants import CONST
35 # __name__ cannot be used here
36 LOGGER = logging.getLogger('functest.ci.run_tests')
38 CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
39 'functest', 'ci/config_functest.yaml')
42 class Result(enum.Enum):
43 """The overall result in enumerated type"""
44 # pylint: disable=too-few-public-methods
49 class BlockingTestFailed(Exception):
50 """Exception when the blocking test fails"""
54 class TestNotEnabled(Exception):
55 """Exception when the test is not enabled"""
59 class RunTestsParser(object):
60 """Parser to run tests"""
61 # pylint: disable=too-few-public-methods
64 self.parser = argparse.ArgumentParser()
65 self.parser.add_argument("-t", "--test", dest="test", action='store',
66 help="Test case or tier (group of tests) "
67 "to be executed. It will run all the test "
69 self.parser.add_argument("-n", "--noclean", help="Do not clean "
70 "OpenStack resources after running each "
71 "test (default=false).",
73 self.parser.add_argument("-r", "--report", help="Push results to "
74 "database (default=false).",
77 def parse_args(self, argv=None):
80 It can call sys.exit if arguments are incorrect.
83 the arguments from cmdline
85 return vars(self.parser.parse_args(argv))
92 self.executed_test_cases = {}
93 self.overall_result = Result.EX_OK
94 self.clean_flag = True
95 self.report_flag = False
96 self.tiers = tb.TierBuilder(
97 CONST.__getattribute__('INSTALLER_TYPE'),
98 CONST.__getattribute__('DEPLOY_SCENARIO'),
99 pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
102 def source_rc_file():
103 """Set the environmental vars from openstack.creds"""
105 rc_file = CONST.__getattribute__('openstack_creds')
106 if not os.path.isfile(rc_file):
107 raise Exception("RC file %s does not exist..." % rc_file)
108 LOGGER.debug("Sourcing the OpenStack RC file...")
109 os_utils.source_credentials(rc_file)
112 def get_run_dict(testname):
113 """Obtain the 'run' block of the testcase from testcases.yaml"""
115 dic_testcase = ft_utils.get_dict_by_test(testname)
117 LOGGER.error("Cannot get %s's config options", testname)
118 elif 'run' in dic_testcase:
119 return dic_testcase['run']
121 except Exception: # pylint: disable=broad-except
122 LOGGER.exception("Cannot get %s's config options", testname)
125 def run_test(self, test):
126 """Run one test case"""
127 if not test.is_enabled():
128 raise TestNotEnabled(
129 "The test case {} is not enabled".format(test.get_name()))
130 LOGGER.info("Running test case '%s'...", test.get_name())
131 result = testcase.TestCase.EX_RUN_ERROR
132 run_dict = self.get_run_dict(test.get_name())
135 module = importlib.import_module(run_dict['module'])
136 cls = getattr(module, run_dict['class'])
137 test_dict = ft_utils.get_dict_by_test(test.get_name())
138 test_case = cls(**test_dict)
139 self.executed_test_cases[test.get_name()] = test_case
141 kwargs = run_dict['args']
142 test_case.run(**kwargs)
146 test_case.push_to_db()
147 if test.get_project() == "functest":
148 result = test_case.is_successful()
150 result = testcase.TestCase.EX_OK
151 LOGGER.info("Test result:\n\n%s\n", test_case)
155 LOGGER.exception("Cannot import module %s", run_dict['module'])
156 except AttributeError:
157 LOGGER.exception("Cannot get class %s", run_dict['class'])
159 raise Exception("Cannot import the class for the test case.")
162 def run_tier(self, tier):
164 tier_name = tier.get_name()
165 tests = tier.get_tests()
167 LOGGER.info("There are no supported test cases in this tier "
168 "for the given scenario")
169 self.overall_result = Result.EX_ERROR
171 LOGGER.info("Running tier '%s'", tier_name)
174 test_case = self.executed_test_cases[test.get_name()]
175 if test_case.is_successful() != testcase.TestCase.EX_OK:
176 LOGGER.error("The test case '%s' failed.", test.get_name())
177 if test.get_project() == "functest":
178 self.overall_result = Result.EX_ERROR
179 if test.is_blocking():
180 raise BlockingTestFailed(
181 "The test case {} failed and is blocking".format(
183 return self.overall_result
186 """Run all available testcases"""
188 msg = prettytable.PrettyTable(
189 header_style='upper', padding_width=5,
190 field_names=['tiers', 'order', 'CI Loop', 'description',
192 for tier in self.tiers.get_tiers():
193 if (tier.get_tests() and
194 re.search(CONST.__getattribute__('CI_LOOP'),
195 tier.get_ci_loop()) is not None):
196 tiers_to_run.append(tier)
197 msg.add_row([tier.get_name(), tier.get_order(),
199 textwrap.fill(tier.description, width=40),
200 textwrap.fill(' '.join([str(x.get_name(
201 )) for x in tier.get_tests()]), width=40)])
202 LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
203 for tier in tiers_to_run:
206 def main(self, **kwargs):
207 """Entry point of class Runner"""
208 if 'noclean' in kwargs:
209 self.clean_flag = not kwargs['noclean']
210 if 'report' in kwargs:
211 self.report_flag = kwargs['report']
214 self.source_rc_file()
215 LOGGER.debug("Test args: %s", kwargs['test'])
216 if self.tiers.get_tier(kwargs['test']):
217 self.run_tier(self.tiers.get_tier(kwargs['test']))
218 elif self.tiers.get_test(kwargs['test']):
219 result = self.run_test(
220 self.tiers.get_test(kwargs['test']))
221 if result != testcase.TestCase.EX_OK:
222 LOGGER.error("The test case '%s' failed.",
224 self.overall_result = Result.EX_ERROR
225 elif kwargs['test'] == "all":
228 LOGGER.error("Unknown test case or tier '%s', or not "
229 "supported by the given scenario '%s'.",
231 CONST.__getattribute__('DEPLOY_SCENARIO'))
232 LOGGER.debug("Available tiers are:\n\n%s",
234 return Result.EX_ERROR
237 except BlockingTestFailed:
239 except Exception: # pylint: disable=broad-except
240 LOGGER.exception("Failures when running testcase(s)")
241 self.overall_result = Result.EX_ERROR
242 if not self.tiers.get_test(kwargs['test']):
243 self.summary(self.tiers.get_tier(kwargs['test']))
244 LOGGER.info("Execution exit value: %s", self.overall_result)
245 return self.overall_result
247 def summary(self, tier=None):
248 """To generate functest report showing the overall results"""
249 msg = prettytable.PrettyTable(
250 header_style='upper', padding_width=5,
251 field_names=['env var', 'value'])
252 for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
254 msg.add_row([env_var, CONST.__getattribute__(env_var)])
255 LOGGER.info("Deployment description:\n\n%s\n", msg)
256 msg = prettytable.PrettyTable(
257 header_style='upper', padding_width=5,
258 field_names=['test case', 'project', 'tier',
259 'duration', 'result'])
260 tiers = [tier] if tier else self.tiers.get_tiers()
261 for each_tier in tiers:
262 for test in each_tier.get_tests():
264 test_case = self.executed_test_cases[test.get_name()]
266 msg.add_row([test.get_name(), test.get_project(),
267 each_tier.get_name(), "00:00", "SKIP"])
269 result = 'PASS' if(test_case.is_successful(
270 ) == test_case.EX_OK) else 'FAIL'
272 [test_case.case_name, test_case.project_name,
273 self.tiers.get_tier_name(test_case.case_name),
274 test_case.get_duration(), result])
275 for test in each_tier.get_skipped_test():
276 msg.add_row([test.get_name(), test.get_project(),
277 each_tier.get_name(), "00:00", "SKIP"])
278 LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
283 logging.config.fileConfig(pkg_resources.resource_filename(
284 'functest', 'ci/logging.ini'))
285 logging.captureWarnings(True)
286 parser = RunTestsParser()
287 args = parser.parse_args(sys.argv[1:])
289 return runner.main(**args).value