3 # Copyright (c) 2016 Ericsson AB and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 """ The entry of running tests:
11 1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
29 import functest.ci.tier_builder as tb
30 import functest.core.testcase as testcase
31 import functest.utils.functest_utils as ft_utils
32 import functest.utils.openstack_utils as os_utils
33 from functest.utils.constants import CONST
35 # __name__ cannot be used here
36 LOGGER = logging.getLogger('functest.ci.run_tests')
38 CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
39 'functest', 'ci/config_functest.yaml')
42 class Result(enum.Enum):
43 """The overall result in enumerated type"""
44 # pylint: disable=too-few-public-methods
49 class BlockingTestFailed(Exception):
50 """Exception when the blocking test fails"""
54 class TestNotEnabled(Exception):
55 """Exception when the test is not enabled"""
59 class RunTestsParser(object):
60 """Parser to run tests"""
61 # pylint: disable=too-few-public-methods
64 self.parser = argparse.ArgumentParser()
65 self.parser.add_argument("-t", "--test", dest="test", action='store',
66 help="Test case or tier (group of tests) "
67 "to be executed. It will run all the test "
69 self.parser.add_argument("-n", "--noclean", help="Do not clean "
70 "OpenStack resources after running each "
71 "test (default=false).",
73 self.parser.add_argument("-r", "--report", help="Push results to "
74 "database (default=false).",
77 def parse_args(self, argv=None):
80 It can call sys.exit if arguments are incorrect.
83 the arguments from cmdline
85 return vars(self.parser.parse_args(argv))
92 self.executed_test_cases = {}
93 self.overall_result = Result.EX_OK
94 self.clean_flag = True
95 self.report_flag = False
96 self._tiers = tb.TierBuilder(
97 CONST.__getattribute__('INSTALLER_TYPE'),
98 CONST.__getattribute__('DEPLOY_SCENARIO'),
99 pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
102 def source_rc_file():
103 """Set the environmental vars from openstack.creds"""
105 rc_file = CONST.__getattribute__('openstack_creds')
106 if not os.path.isfile(rc_file):
107 raise Exception("RC file %s does not exist..." % rc_file)
108 LOGGER.debug("Sourcing the OpenStack RC file...")
109 os_utils.source_credentials(rc_file)
112 def get_run_dict(testname):
113 """Obtain the the 'run' block of the testcase from testcases.yaml"""
115 dic_testcase = ft_utils.get_dict_by_test(testname)
117 LOGGER.error("Cannot get %s's config options", testname)
118 elif 'run' in dic_testcase:
119 return dic_testcase['run']
121 except Exception: # pylint: disable=broad-except
122 LOGGER.exception("Cannot get %s's config options", testname)
125 def run_test(self, test):
126 """Run one test case"""
127 if not test.is_enabled():
128 raise TestNotEnabled(
129 "The test case {} is not enabled".format(test.get_name()))
130 LOGGER.info("Running test case '%s'...", test.get_name())
131 result = testcase.TestCase.EX_RUN_ERROR
132 run_dict = self.get_run_dict(test.get_name())
135 module = importlib.import_module(run_dict['module'])
136 cls = getattr(module, run_dict['class'])
137 test_dict = ft_utils.get_dict_by_test(test.get_name())
138 test_case = cls(**test_dict)
139 self.executed_test_cases[test.get_name()] = test_case
141 if test_case.create_snapshot() != test_case.EX_OK:
142 return testcase.TestCase.EX_RUN_ERROR
144 kwargs = run_dict['args']
145 test_case.run(**kwargs)
149 test_case.push_to_db()
150 if test.get_project() == "functest":
151 result = test_case.is_successful()
153 result = testcase.TestCase.EX_OK
154 LOGGER.info("Test result:\n\n%s\n", test_case)
158 LOGGER.exception("Cannot import module %s", run_dict['module'])
159 except AttributeError:
160 LOGGER.exception("Cannot get class %s", run_dict['class'])
162 raise Exception("Cannot import the class for the test case.")
165 def run_tier(self, tier):
167 tier_name = tier.get_name()
168 tests = tier.get_tests()
170 LOGGER.info("There are no supported test cases in this tier "
171 "for the given scenario")
172 self.overall_result = Result.EX_ERROR
174 LOGGER.info("Running tier '%s'", tier_name)
177 test_case = self.executed_test_cases[test.get_name()]
178 if test_case.is_successful() != testcase.TestCase.EX_OK:
179 LOGGER.error("The test case '%s' failed.", test.get_name())
180 if test.get_project() == "functest":
181 self.overall_result = Result.EX_ERROR
182 if test.is_blocking():
183 raise BlockingTestFailed(
184 "The test case {} failed and is blocking".format(
186 return self.overall_result
189 """Run all available testcases"""
191 msg = prettytable.PrettyTable(
192 header_style='upper', padding_width=5,
193 field_names=['tiers', 'order', 'CI Loop', 'description',
195 for tier in self._tiers.get_tiers():
196 if (tier.get_tests() and
197 re.search(CONST.__getattribute__('CI_LOOP'),
198 tier.get_ci_loop()) is not None):
199 tiers_to_run.append(tier)
200 msg.add_row([tier.get_name(), tier.get_order(),
202 textwrap.fill(tier.description, width=40),
203 textwrap.fill(' '.join([str(x.get_name(
204 )) for x in tier.get_tests()]), width=40)])
205 LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
206 for tier in tiers_to_run:
209 def main(self, **kwargs):
210 """Entry point of class Runner"""
211 if 'noclean' in kwargs:
212 self.clean_flag = not kwargs['noclean']
213 if 'report' in kwargs:
214 self.report_flag = kwargs['report']
217 self.source_rc_file()
218 LOGGER.debug("Test args: %s", kwargs['test'])
219 if self._tiers.get_tier(kwargs['test']):
220 self.run_tier(self._tiers.get_tier(kwargs['test']))
221 elif self._tiers.get_test(kwargs['test']):
222 result = self.run_test(
223 self._tiers.get_test(kwargs['test']))
224 if result != testcase.TestCase.EX_OK:
225 LOGGER.error("The test case '%s' failed.",
227 self.overall_result = Result.EX_ERROR
228 elif kwargs['test'] == "all":
231 LOGGER.error("Unknown test case or tier '%s', or not "
232 "supported by the given scenario '%s'.",
234 CONST.__getattribute__('DEPLOY_SCENARIO'))
235 LOGGER.debug("Available tiers are:\n\n%s",
237 return Result.EX_ERROR
240 except BlockingTestFailed:
242 except Exception: # pylint: disable=broad-except
243 LOGGER.exception("Failures when running testcase(s)")
244 self.overall_result = Result.EX_ERROR
245 if not self._tiers.get_test(kwargs['test']):
246 self.summary(self._tiers.get_tier(kwargs['test']))
247 LOGGER.info("Execution exit value: %s", self.overall_result)
248 return self.overall_result
250 def summary(self, tier=None):
251 """To generate functest report showing the overall results"""
252 msg = prettytable.PrettyTable(
253 header_style='upper', padding_width=5,
254 field_names=['env var', 'value'])
255 for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
257 msg.add_row([env_var, CONST.__getattribute__(env_var)])
258 LOGGER.info("Deployment description:\n\n%s\n", msg)
259 msg = prettytable.PrettyTable(
260 header_style='upper', padding_width=5,
261 field_names=['test case', 'project', 'tier',
262 'duration', 'result'])
263 tiers = [tier] if tier else self._tiers.get_tiers()
264 for each_tier in tiers:
265 for test in each_tier.get_tests():
267 test_case = self.executed_test_cases[test.get_name()]
269 msg.add_row([test.get_name(), test.get_project(),
270 each_tier.get_name(), "00:00", "SKIP"])
272 result = 'PASS' if(test_case.is_successful(
273 ) == test_case.EX_OK) else 'FAIL'
275 [test_case.case_name, test_case.project_name,
276 self._tiers.get_tier_name(test_case.case_name),
277 test_case.get_duration(), result])
278 for test in each_tier.get_skipped_test():
279 msg.add_row([test.get_name(), test.get_project(),
280 each_tier.get_name(), "00:00", "SKIP"])
281 LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
286 logging.config.fileConfig(pkg_resources.resource_filename(
287 'functest', 'ci/logging.ini'))
288 logging.captureWarnings(True)
289 parser = RunTestsParser()
290 args = parser.parse_args(sys.argv[1:])
292 return runner.main(**args).value