3 # Copyright (c) 2016 Ericsson AB and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 """ The entry of running tests:
11 1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
30 import functest.ci.tier_builder as tb
31 import functest.core.testcase as testcase
32 import functest.utils.functest_utils as ft_utils
33 import functest.utils.openstack_utils as os_utils
34 from functest.utils.constants import CONST
36 # __name__ cannot be used here
37 LOGGER = logging.getLogger('functest.ci.run_tests')
39 CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
40 'functest', 'ci/config_functest.yaml')
43 class Result(enum.Enum):
44 """The overall result in enumerated type"""
45 # pylint: disable=too-few-public-methods
50 class BlockingTestFailed(Exception):
51 """Exception when the blocking test fails"""
55 class TestNotEnabled(Exception):
56 """Exception when the test is not enabled"""
60 class RunTestsParser(object):
61 """Parser to run tests"""
62 # pylint: disable=too-few-public-methods
65 self.parser = argparse.ArgumentParser()
66 self.parser.add_argument("-t", "--test", dest="test", action='store',
67 help="Test case or tier (group of tests) "
68 "to be executed. It will run all the test "
70 self.parser.add_argument("-n", "--noclean", help="Do not clean "
71 "OpenStack resources after running each "
72 "test (default=false).",
74 self.parser.add_argument("-r", "--report", help="Push results to "
75 "database (default=false).",
78 def parse_args(self, argv=None):
81 It can call sys.exit if arguments are incorrect.
84 the arguments from cmdline
86 return vars(self.parser.parse_args(argv))
93 self.executed_test_cases = {}
94 self.overall_result = Result.EX_OK
95 self.clean_flag = True
96 self.report_flag = False
97 self._tiers = tb.TierBuilder(
98 CONST.__getattribute__('INSTALLER_TYPE'),
99 CONST.__getattribute__('DEPLOY_SCENARIO'),
100 pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
103 def source_rc_file():
104 """Set the environmental vars from openstack.creds"""
106 rc_file = CONST.__getattribute__('openstack_creds')
107 if not os.path.isfile(rc_file):
108 raise Exception("RC file %s does not exist..." % rc_file)
109 LOGGER.debug("Sourcing the OpenStack RC file...")
110 os_utils.source_credentials(rc_file)
111 for key, value in six.iteritems(os.environ):
112 if re.search("OS_", key):
113 if key == 'OS_AUTH_URL':
114 CONST.__setattr__('OS_AUTH_URL', value)
115 elif key == 'OS_USERNAME':
116 CONST.__setattr__('OS_USERNAME', value)
117 elif key == 'OS_TENANT_NAME':
118 CONST.__setattr__('OS_TENANT_NAME', value)
119 elif key == 'OS_PASSWORD':
120 CONST.__setattr__('OS_PASSWORD', value)
121 elif key == "OS_PROJECT_DOMAIN_NAME":
122 CONST.__setattr__('OS_PROJECT_DOMAIN_NAME', value)
125 def get_run_dict(testname):
126 """Obtain the the 'run' block of the testcase from testcases.yaml"""
128 dic_testcase = ft_utils.get_dict_by_test(testname)
130 LOGGER.error("Cannot get %s's config options", testname)
131 elif 'run' in dic_testcase:
132 return dic_testcase['run']
134 except Exception: # pylint: disable=broad-except
135 LOGGER.exception("Cannot get %s's config options", testname)
138 def run_test(self, test):
139 """Run one test case"""
140 if not test.is_enabled():
141 raise TestNotEnabled(
142 "The test case {} is not enabled".format(test.get_name()))
143 LOGGER.info("Running test case '%s'...", test.get_name())
144 result = testcase.TestCase.EX_RUN_ERROR
145 run_dict = self.get_run_dict(test.get_name())
148 module = importlib.import_module(run_dict['module'])
149 cls = getattr(module, run_dict['class'])
150 test_dict = ft_utils.get_dict_by_test(test.get_name())
151 test_case = cls(**test_dict)
152 self.executed_test_cases[test.get_name()] = test_case
154 if test_case.create_snapshot() != test_case.EX_OK:
155 return testcase.TestCase.EX_RUN_ERROR
157 kwargs = run_dict['args']
158 test_case.run(**kwargs)
162 test_case.push_to_db()
163 if test.get_project() == "functest":
164 result = test_case.is_successful()
166 result = testcase.TestCase.EX_OK
167 LOGGER.info("Test result:\n\n%s\n", test_case)
171 LOGGER.exception("Cannot import module %s", run_dict['module'])
172 except AttributeError:
173 LOGGER.exception("Cannot get class %s", run_dict['class'])
175 raise Exception("Cannot import the class for the test case.")
178 def run_tier(self, tier):
180 tier_name = tier.get_name()
181 tests = tier.get_tests()
183 LOGGER.info("There are no supported test cases in this tier "
184 "for the given scenario")
185 self.overall_result = Result.EX_ERROR
187 LOGGER.info("Running tier '%s'", tier_name)
190 test_case = self.executed_test_cases[test.get_name()]
191 if test_case.is_successful() != testcase.TestCase.EX_OK:
192 LOGGER.error("The test case '%s' failed.", test.get_name())
193 if test.get_project() == "functest":
194 self.overall_result = Result.EX_ERROR
195 if test.is_blocking():
196 raise BlockingTestFailed(
197 "The test case {} failed and is blocking".format(
199 return self.overall_result
202 """Run all available testcases"""
204 msg = prettytable.PrettyTable(
205 header_style='upper', padding_width=5,
206 field_names=['tiers', 'order', 'CI Loop', 'description',
208 for tier in self._tiers.get_tiers():
209 if (tier.get_tests() and
210 re.search(CONST.__getattribute__('CI_LOOP'),
211 tier.get_ci_loop()) is not None):
212 tiers_to_run.append(tier)
213 msg.add_row([tier.get_name(), tier.get_order(),
215 textwrap.fill(tier.description, width=40),
216 textwrap.fill(' '.join([str(x.get_name(
217 )) for x in tier.get_tests()]), width=40)])
218 LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
219 for tier in tiers_to_run:
222 def main(self, **kwargs):
223 """Entry point of class Runner"""
224 if 'noclean' in kwargs:
225 self.clean_flag = not kwargs['noclean']
226 if 'report' in kwargs:
227 self.report_flag = kwargs['report']
230 self.source_rc_file()
231 LOGGER.debug("Test args: %s", kwargs['test'])
232 if self._tiers.get_tier(kwargs['test']):
233 self.run_tier(self._tiers.get_tier(kwargs['test']))
234 elif self._tiers.get_test(kwargs['test']):
235 result = self.run_test(
236 self._tiers.get_test(kwargs['test']))
237 if result != testcase.TestCase.EX_OK:
238 LOGGER.error("The test case '%s' failed.",
240 self.overall_result = Result.EX_ERROR
241 elif kwargs['test'] == "all":
244 LOGGER.error("Unknown test case or tier '%s', or not "
245 "supported by the given scenario '%s'.",
247 CONST.__getattribute__('DEPLOY_SCENARIO'))
248 LOGGER.debug("Available tiers are:\n\n%s",
250 return Result.EX_ERROR
253 except BlockingTestFailed:
255 except Exception: # pylint: disable=broad-except
256 LOGGER.exception("Failures when running testcase(s)")
257 self.overall_result = Result.EX_ERROR
258 if not self._tiers.get_test(kwargs['test']):
259 self.summary(self._tiers.get_tier(kwargs['test']))
260 LOGGER.info("Execution exit value: %s", self.overall_result)
261 return self.overall_result
263 def summary(self, tier=None):
264 """To generate functest report showing the overall results"""
265 msg = prettytable.PrettyTable(
266 header_style='upper', padding_width=5,
267 field_names=['env var', 'value'])
268 for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
270 msg.add_row([env_var, CONST.__getattribute__(env_var)])
271 LOGGER.info("Deployment description:\n\n%s\n", msg)
272 msg = prettytable.PrettyTable(
273 header_style='upper', padding_width=5,
274 field_names=['test case', 'project', 'tier',
275 'duration', 'result'])
276 tiers = [tier] if tier else self._tiers.get_tiers()
277 for each_tier in tiers:
278 for test in each_tier.get_tests():
280 test_case = self.executed_test_cases[test.get_name()]
282 msg.add_row([test.get_name(), test.get_project(),
283 each_tier.get_name(), "00:00", "SKIP"])
285 result = 'PASS' if(test_case.is_successful(
286 ) == test_case.EX_OK) else 'FAIL'
288 [test_case.case_name, test_case.project_name,
289 self._tiers.get_tier_name(test_case.case_name),
290 test_case.get_duration(), result])
291 for test in each_tier.get_skipped_test():
292 msg.add_row([test.get_name(), test.get_project(),
293 each_tier.get_name(), "00:00", "SKIP"])
294 LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
299 logging.config.fileConfig(pkg_resources.resource_filename(
300 'functest', 'ci/logging.ini'))
301 logging.captureWarnings(True)
302 parser = RunTestsParser()
303 args = parser.parse_args(sys.argv[1:])
305 return runner.main(**args).value