3 # Copyright (c) 2016 Ericsson AB and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 """ The entry of running tests:
11 1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
29 import functest.ci.tier_builder as tb
30 import functest.core.testcase as testcase
31 import functest.utils.functest_utils as ft_utils
32 import functest.utils.openstack_utils as os_utils
33 from functest.utils.constants import CONST
35 # __name__ cannot be used here
36 LOGGER = logging.getLogger('functest.ci.run_tests')
38 CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
39 'functest', 'ci/config_functest.yaml')
42 class Result(enum.Enum):
43 """The overall result in enumerated type"""
44 # pylint: disable=too-few-public-methods
49 class BlockingTestFailed(Exception):
50 """Exception when the blocking test fails"""
54 class TestNotEnabled(Exception):
55 """Exception when the test is not enabled"""
59 class RunTestsParser(object):
60 """Parser to run tests"""
61 # pylint: disable=too-few-public-methods
64 self.parser = argparse.ArgumentParser()
65 self.parser.add_argument("-t", "--test", dest="test", action='store',
66 help="Test case or tier (group of tests) "
67 "to be executed. It will run all the test "
69 self.parser.add_argument("-n", "--noclean", help="Do not clean "
70 "OpenStack resources after running each "
71 "test (default=false).",
73 self.parser.add_argument("-r", "--report", help="Push results to "
74 "database (default=false).",
77 def parse_args(self, argv=None):
80 It can call sys.exit if arguments are incorrect.
83 the arguments from cmdline
85 return vars(self.parser.parse_args(argv))
92 self.executed_test_cases = {}
93 self.overall_result = Result.EX_OK
94 self.clean_flag = True
95 self.report_flag = False
96 self.tiers = tb.TierBuilder(
97 CONST.__getattribute__('INSTALLER_TYPE'),
98 CONST.__getattribute__('DEPLOY_SCENARIO'),
99 pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
102 def get_run_dict(testname):
103 """Obtain the 'run' block of the testcase from testcases.yaml"""
105 dic_testcase = ft_utils.get_dict_by_test(testname)
107 LOGGER.error("Cannot get %s's config options", testname)
108 elif 'run' in dic_testcase:
109 return dic_testcase['run']
111 except Exception: # pylint: disable=broad-except
112 LOGGER.exception("Cannot get %s's config options", testname)
115 def run_test(self, test):
116 """Run one test case"""
117 if not test.is_enabled():
118 raise TestNotEnabled(
119 "The test case {} is not enabled".format(test.get_name()))
120 LOGGER.info("Running test case '%s'...", test.get_name())
121 result = testcase.TestCase.EX_RUN_ERROR
122 run_dict = self.get_run_dict(test.get_name())
125 module = importlib.import_module(run_dict['module'])
126 cls = getattr(module, run_dict['class'])
127 test_dict = ft_utils.get_dict_by_test(test.get_name())
128 test_case = cls(**test_dict)
129 self.executed_test_cases[test.get_name()] = test_case
131 kwargs = run_dict['args']
132 test_case.run(**kwargs)
136 test_case.push_to_db()
137 if test.get_project() == "functest":
138 result = test_case.is_successful()
140 result = testcase.TestCase.EX_OK
141 LOGGER.info("Test result:\n\n%s\n", test_case)
145 LOGGER.exception("Cannot import module %s", run_dict['module'])
146 except AttributeError:
147 LOGGER.exception("Cannot get class %s", run_dict['class'])
149 raise Exception("Cannot import the class for the test case.")
152 def run_tier(self, tier):
154 tier_name = tier.get_name()
155 tests = tier.get_tests()
157 LOGGER.info("There are no supported test cases in this tier "
158 "for the given scenario")
159 self.overall_result = Result.EX_ERROR
161 LOGGER.info("Running tier '%s'", tier_name)
164 test_case = self.executed_test_cases[test.get_name()]
165 if test_case.is_successful() != testcase.TestCase.EX_OK:
166 LOGGER.error("The test case '%s' failed.", test.get_name())
167 if test.get_project() == "functest":
168 self.overall_result = Result.EX_ERROR
169 if test.is_blocking():
170 raise BlockingTestFailed(
171 "The test case {} failed and is blocking".format(
173 return self.overall_result
176 """Run all available testcases"""
178 msg = prettytable.PrettyTable(
179 header_style='upper', padding_width=5,
180 field_names=['tiers', 'order', 'CI Loop', 'description',
182 for tier in self.tiers.get_tiers():
183 if (tier.get_tests() and
184 re.search(CONST.__getattribute__('CI_LOOP'),
185 tier.get_ci_loop()) is not None):
186 tiers_to_run.append(tier)
187 msg.add_row([tier.get_name(), tier.get_order(),
189 textwrap.fill(tier.description, width=40),
190 textwrap.fill(' '.join([str(x.get_name(
191 )) for x in tier.get_tests()]), width=40)])
192 LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
193 for tier in tiers_to_run:
196 def main(self, **kwargs):
197 """Entry point of class Runner"""
198 if 'noclean' in kwargs:
199 self.clean_flag = not kwargs['noclean']
200 if 'report' in kwargs:
201 self.report_flag = kwargs['report']
204 LOGGER.debug("Sourcing the credential file...")
205 os_utils.source_credentials(CONST.__getattribute__('env_file'))
207 LOGGER.debug("Test args: %s", kwargs['test'])
208 if self.tiers.get_tier(kwargs['test']):
209 self.run_tier(self.tiers.get_tier(kwargs['test']))
210 elif self.tiers.get_test(kwargs['test']):
211 result = self.run_test(
212 self.tiers.get_test(kwargs['test']))
213 if result != testcase.TestCase.EX_OK:
214 LOGGER.error("The test case '%s' failed.",
216 self.overall_result = Result.EX_ERROR
217 elif kwargs['test'] == "all":
220 LOGGER.error("Unknown test case or tier '%s', or not "
221 "supported by the given scenario '%s'.",
223 CONST.__getattribute__('DEPLOY_SCENARIO'))
224 LOGGER.debug("Available tiers are:\n\n%s",
226 return Result.EX_ERROR
229 except BlockingTestFailed:
231 except Exception: # pylint: disable=broad-except
232 LOGGER.exception("Failures when running testcase(s)")
233 self.overall_result = Result.EX_ERROR
234 if not self.tiers.get_test(kwargs['test']):
235 self.summary(self.tiers.get_tier(kwargs['test']))
236 LOGGER.info("Execution exit value: %s", self.overall_result)
237 return self.overall_result
239 def summary(self, tier=None):
240 """To generate functest report showing the overall results"""
241 msg = prettytable.PrettyTable(
242 header_style='upper', padding_width=5,
243 field_names=['env var', 'value'])
244 for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
246 msg.add_row([env_var, CONST.__getattribute__(env_var)])
247 LOGGER.info("Deployment description:\n\n%s\n", msg)
248 msg = prettytable.PrettyTable(
249 header_style='upper', padding_width=5,
250 field_names=['test case', 'project', 'tier',
251 'duration', 'result'])
252 tiers = [tier] if tier else self.tiers.get_tiers()
253 for each_tier in tiers:
254 for test in each_tier.get_tests():
256 test_case = self.executed_test_cases[test.get_name()]
258 msg.add_row([test.get_name(), test.get_project(),
259 each_tier.get_name(), "00:00", "SKIP"])
261 result = 'PASS' if(test_case.is_successful(
262 ) == test_case.EX_OK) else 'FAIL'
264 [test_case.case_name, test_case.project_name,
265 self.tiers.get_tier_name(test_case.case_name),
266 test_case.get_duration(), result])
267 for test in each_tier.get_skipped_test():
268 msg.add_row([test.get_name(), test.get_project(),
269 each_tier.get_name(), "00:00", "SKIP"])
270 LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
275 logging.config.fileConfig(pkg_resources.resource_filename(
276 'functest', 'ci/logging.ini'))
277 logging.captureWarnings(True)
278 parser = RunTestsParser()
279 args = parser.parse_args(sys.argv[1:])
281 return runner.main(**args).value