3 # Copyright (c) 2016 Ericsson AB and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 """ The entry of running tests:
11 1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
30 from functest.ci import tier_builder
31 from functest.core import testcase
32 from functest.utils import env
34 LOGGER = logging.getLogger('functest.ci.run_tests')
35 ENV_FILE = "/home/opnfv/functest/conf/env_file"
38 class Result(enum.Enum):
39 """The overall result in enumerated type"""
40 # pylint: disable=too-few-public-methods
45 class BlockingTestFailed(Exception):
46 """Exception when the blocking test fails"""
50 class TestNotEnabled(Exception):
51 """Exception when the test is not enabled"""
55 class RunTestsParser(object):
56 """Parser to run tests"""
57 # pylint: disable=too-few-public-methods
60 self.parser = argparse.ArgumentParser()
61 self.parser.add_argument("-t", "--test", dest="test", action='store',
62 help="Test case or tier (group of tests) "
63 "to be executed. It will run all the test "
65 self.parser.add_argument("-n", "--noclean", help="Do not clean "
66 "OpenStack resources after running each "
67 "test (default=false).",
69 self.parser.add_argument("-r", "--report", help="Push results to "
70 "database (default=false).",
73 def parse_args(self, argv=None):
76 It can call sys.exit if arguments are incorrect.
79 the arguments from cmdline
81 return vars(self.parser.parse_args(argv))
88 self.executed_test_cases = {}
89 self.overall_result = Result.EX_OK
90 self.clean_flag = True
91 self.report_flag = False
92 self.tiers = tier_builder.TierBuilder(
93 env.get('INSTALLER_TYPE'),
94 env.get('DEPLOY_SCENARIO'),
95 pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
98 def source_envfile(rc_file=ENV_FILE):
99 """Source the env file passed as arg"""
100 if not os.path.isfile(rc_file):
101 LOGGER.debug("No env file %s found", rc_file)
103 with open(rc_file, "r") as rcfd:
104 LOGGER.info("Sourcing env file %s", rc_file)
106 var = (line.rstrip('"\n').replace('export ', '').split(
107 "=") if re.search(r'(.*)=(.*)', line) else None)
108 # The two next lines should be modified as soon as rc_file
109 # conforms with common rules. Be aware that it could induce
110 # issues if value starts with '
112 key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
113 value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
114 os.environ[key] = value
117 def get_dict_by_test(testname):
118 # pylint: disable=bad-continuation,missing-docstring
119 with open(pkg_resources.resource_filename(
120 'functest', 'ci/testcases.yaml')) as tyaml:
121 testcases_yaml = yaml.safe_load(tyaml)
122 for dic_tier in testcases_yaml.get("tiers"):
123 for dic_testcase in dic_tier['testcases']:
124 if dic_testcase['case_name'] == testname:
126 LOGGER.error('Project %s is not defined in testcases.yaml', testname)
130 def get_run_dict(testname):
131 """Obtain the 'run' block of the testcase from testcases.yaml"""
133 dic_testcase = Runner.get_dict_by_test(testname)
135 LOGGER.error("Cannot get %s's config options", testname)
136 elif 'run' in dic_testcase:
137 return dic_testcase['run']
139 except Exception: # pylint: disable=broad-except
140 LOGGER.exception("Cannot get %s's config options", testname)
143 def run_test(self, test):
144 """Run one test case"""
145 if not test.is_enabled():
146 raise TestNotEnabled(
147 "The test case {} is not enabled".format(test.get_name()))
148 LOGGER.info("Running test case '%s'...", test.get_name())
149 result = testcase.TestCase.EX_RUN_ERROR
150 run_dict = self.get_run_dict(test.get_name())
153 module = importlib.import_module(run_dict['module'])
154 cls = getattr(module, run_dict['class'])
155 test_dict = Runner.get_dict_by_test(test.get_name())
156 test_case = cls(**test_dict)
157 self.executed_test_cases[test.get_name()] = test_case
159 kwargs = run_dict['args']
160 test_case.run(**kwargs)
164 test_case.push_to_db()
165 if test.get_project() == "functest":
166 result = test_case.is_successful()
168 result = testcase.TestCase.EX_OK
169 LOGGER.info("Test result:\n\n%s\n", test_case)
173 LOGGER.exception("Cannot import module %s", run_dict['module'])
174 except AttributeError:
175 LOGGER.exception("Cannot get class %s", run_dict['class'])
177 raise Exception("Cannot import the class for the test case.")
180 def run_tier(self, tier):
182 tier_name = tier.get_name()
183 tests = tier.get_tests()
185 LOGGER.info("There are no supported test cases in this tier "
186 "for the given scenario")
187 self.overall_result = Result.EX_ERROR
189 LOGGER.info("Running tier '%s'", tier_name)
192 test_case = self.executed_test_cases[test.get_name()]
193 if test_case.is_successful() != testcase.TestCase.EX_OK:
194 LOGGER.error("The test case '%s' failed.", test.get_name())
195 if test.get_project() == "functest":
196 self.overall_result = Result.EX_ERROR
197 if test.is_blocking():
198 raise BlockingTestFailed(
199 "The test case {} failed and is blocking".format(
201 return self.overall_result
204 """Run all available testcases"""
206 msg = prettytable.PrettyTable(
207 header_style='upper', padding_width=5,
208 field_names=['tiers', 'order', 'CI Loop', 'description',
210 for tier in self.tiers.get_tiers():
211 ci_loop = env.get('CI_LOOP')
212 if (tier.get_tests() and
213 re.search(ci_loop, tier.get_ci_loop()) is not None):
214 tiers_to_run.append(tier)
215 msg.add_row([tier.get_name(), tier.get_order(),
217 textwrap.fill(tier.description, width=40),
218 textwrap.fill(' '.join([str(x.get_name(
219 )) for x in tier.get_tests()]), width=40)])
220 LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
221 for tier in tiers_to_run:
224 def main(self, **kwargs):
225 """Entry point of class Runner"""
226 if 'noclean' in kwargs:
227 self.clean_flag = not kwargs['noclean']
228 if 'report' in kwargs:
229 self.report_flag = kwargs['report']
232 LOGGER.debug("Sourcing the credential file...")
233 self.source_envfile()
235 LOGGER.debug("Test args: %s", kwargs['test'])
236 if self.tiers.get_tier(kwargs['test']):
237 self.run_tier(self.tiers.get_tier(kwargs['test']))
238 elif self.tiers.get_test(kwargs['test']):
239 result = self.run_test(
240 self.tiers.get_test(kwargs['test']))
241 if result != testcase.TestCase.EX_OK:
242 LOGGER.error("The test case '%s' failed.",
244 self.overall_result = Result.EX_ERROR
245 elif kwargs['test'] == "all":
248 LOGGER.error("Unknown test case or tier '%s', or not "
249 "supported by the given scenario '%s'.",
251 env.get('DEPLOY_SCENARIO'))
252 LOGGER.debug("Available tiers are:\n\n%s",
254 return Result.EX_ERROR
257 except BlockingTestFailed:
259 except Exception: # pylint: disable=broad-except
260 LOGGER.exception("Failures when running testcase(s)")
261 self.overall_result = Result.EX_ERROR
262 if not self.tiers.get_test(kwargs['test']):
263 self.summary(self.tiers.get_tier(kwargs['test']))
264 LOGGER.info("Execution exit value: %s", self.overall_result)
265 return self.overall_result
267 def summary(self, tier=None):
268 """To generate functest report showing the overall results"""
269 msg = prettytable.PrettyTable(
270 header_style='upper', padding_width=5,
271 field_names=['env var', 'value'])
272 for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
274 msg.add_row([env_var, env.get(env_var)])
275 LOGGER.info("Deployment description:\n\n%s\n", msg)
276 msg = prettytable.PrettyTable(
277 header_style='upper', padding_width=5,
278 field_names=['test case', 'project', 'tier',
279 'duration', 'result'])
280 tiers = [tier] if tier else self.tiers.get_tiers()
281 for each_tier in tiers:
282 for test in each_tier.get_tests():
284 test_case = self.executed_test_cases[test.get_name()]
286 msg.add_row([test.get_name(), test.get_project(),
287 each_tier.get_name(), "00:00", "SKIP"])
289 result = 'PASS' if(test_case.is_successful(
290 ) == test_case.EX_OK) else 'FAIL'
292 [test_case.case_name, test_case.project_name,
293 self.tiers.get_tier_name(test_case.case_name),
294 test_case.get_duration(), result])
295 for test in each_tier.get_skipped_test():
296 msg.add_row([test.get_name(), test.get_project(),
297 each_tier.get_name(), "00:00", "SKIP"])
298 LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
303 logging.config.fileConfig(pkg_resources.resource_filename(
304 'functest', 'ci/logging.ini'))
305 logging.captureWarnings(True)
306 parser = RunTestsParser()
307 args = parser.parse_args(sys.argv[1:])
309 return runner.main(**args).value