3 # Copyright (c) 2016 Ericsson AB and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 """ The entry of running tests:
11 1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
30 import functest.ci.tier_builder as tb
31 import functest.core.testcase as testcase
33 LOGGER = logging.getLogger('functest.ci.run_tests')
34 ENV_FILE = "/home/opnfv/functest/conf/env_file"
37 class Result(enum.Enum):
38 """The overall result in enumerated type"""
39 # pylint: disable=too-few-public-methods
44 class BlockingTestFailed(Exception):
45 """Exception when the blocking test fails"""
49 class TestNotEnabled(Exception):
50 """Exception when the test is not enabled"""
54 class RunTestsParser(object):
55 """Parser to run tests"""
56 # pylint: disable=too-few-public-methods
59 self.parser = argparse.ArgumentParser()
60 self.parser.add_argument("-t", "--test", dest="test", action='store',
61 help="Test case or tier (group of tests) "
62 "to be executed. It will run all the test "
64 self.parser.add_argument("-n", "--noclean", help="Do not clean "
65 "OpenStack resources after running each "
66 "test (default=false).",
68 self.parser.add_argument("-r", "--report", help="Push results to "
69 "database (default=false).",
72 def parse_args(self, argv=None):
75 It can call sys.exit if arguments are incorrect.
78 the arguments from cmdline
80 return vars(self.parser.parse_args(argv))
87 self.executed_test_cases = {}
88 self.overall_result = Result.EX_OK
89 self.clean_flag = True
90 self.report_flag = False
91 self.tiers = tb.TierBuilder(
92 os.environ.get('INSTALLER_TYPE', None),
93 os.environ.get('DEPLOY_SCENARIO', None),
94 pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
97 def source_envfile(rc_file=ENV_FILE):
98 """Source the env file passed as arg"""
99 with open(rc_file, "r") as rcfd:
101 var = (line.rstrip('"\n').replace('export ', '').split(
102 "=") if re.search(r'(.*)=(.*)', line) else None)
103 # The two next lines should be modified as soon as rc_file
104 # conforms with common rules. Be aware that it could induce
105 # issues if value starts with '
107 key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
108 value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
109 os.environ[key] = value
112 def get_dict_by_test(testname):
113 # pylint: disable=bad-continuation,missing-docstring
114 with open(pkg_resources.resource_filename(
115 'functest', 'ci/testcases.yaml')) as tyaml:
116 testcases_yaml = yaml.safe_load(tyaml)
117 for dic_tier in testcases_yaml.get("tiers"):
118 for dic_testcase in dic_tier['testcases']:
119 if dic_testcase['case_name'] == testname:
121 LOGGER.error('Project %s is not defined in testcases.yaml', testname)
125 def get_run_dict(testname):
126 """Obtain the 'run' block of the testcase from testcases.yaml"""
128 dic_testcase = Runner.get_dict_by_test(testname)
130 LOGGER.error("Cannot get %s's config options", testname)
131 elif 'run' in dic_testcase:
132 return dic_testcase['run']
134 except Exception: # pylint: disable=broad-except
135 LOGGER.exception("Cannot get %s's config options", testname)
138 def run_test(self, test):
139 """Run one test case"""
140 if not test.is_enabled():
141 raise TestNotEnabled(
142 "The test case {} is not enabled".format(test.get_name()))
143 LOGGER.info("Running test case '%s'...", test.get_name())
144 result = testcase.TestCase.EX_RUN_ERROR
145 run_dict = self.get_run_dict(test.get_name())
148 module = importlib.import_module(run_dict['module'])
149 cls = getattr(module, run_dict['class'])
150 test_dict = Runner.get_dict_by_test(test.get_name())
151 test_case = cls(**test_dict)
152 self.executed_test_cases[test.get_name()] = test_case
154 kwargs = run_dict['args']
155 test_case.run(**kwargs)
159 test_case.push_to_db()
160 if test.get_project() == "functest":
161 result = test_case.is_successful()
163 result = testcase.TestCase.EX_OK
164 LOGGER.info("Test result:\n\n%s\n", test_case)
168 LOGGER.exception("Cannot import module %s", run_dict['module'])
169 except AttributeError:
170 LOGGER.exception("Cannot get class %s", run_dict['class'])
172 raise Exception("Cannot import the class for the test case.")
175 def run_tier(self, tier):
177 tier_name = tier.get_name()
178 tests = tier.get_tests()
180 LOGGER.info("There are no supported test cases in this tier "
181 "for the given scenario")
182 self.overall_result = Result.EX_ERROR
184 LOGGER.info("Running tier '%s'", tier_name)
187 test_case = self.executed_test_cases[test.get_name()]
188 if test_case.is_successful() != testcase.TestCase.EX_OK:
189 LOGGER.error("The test case '%s' failed.", test.get_name())
190 if test.get_project() == "functest":
191 self.overall_result = Result.EX_ERROR
192 if test.is_blocking():
193 raise BlockingTestFailed(
194 "The test case {} failed and is blocking".format(
196 return self.overall_result
199 """Run all available testcases"""
201 msg = prettytable.PrettyTable(
202 header_style='upper', padding_width=5,
203 field_names=['tiers', 'order', 'CI Loop', 'description',
205 for tier in self.tiers.get_tiers():
206 ci_loop = os.environ.get('CI_LOOP', None)
207 if (tier.get_tests() and ci_loop and
208 re.search(ci_loop, tier.get_ci_loop()) is not None):
209 tiers_to_run.append(tier)
210 msg.add_row([tier.get_name(), tier.get_order(),
212 textwrap.fill(tier.description, width=40),
213 textwrap.fill(' '.join([str(x.get_name(
214 )) for x in tier.get_tests()]), width=40)])
215 LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
216 for tier in tiers_to_run:
219 def main(self, **kwargs):
220 """Entry point of class Runner"""
221 if 'noclean' in kwargs:
222 self.clean_flag = not kwargs['noclean']
223 if 'report' in kwargs:
224 self.report_flag = kwargs['report']
227 LOGGER.debug("Sourcing the credential file...")
228 self.source_envfile()
230 LOGGER.debug("Test args: %s", kwargs['test'])
231 if self.tiers.get_tier(kwargs['test']):
232 self.run_tier(self.tiers.get_tier(kwargs['test']))
233 elif self.tiers.get_test(kwargs['test']):
234 result = self.run_test(
235 self.tiers.get_test(kwargs['test']))
236 if result != testcase.TestCase.EX_OK:
237 LOGGER.error("The test case '%s' failed.",
239 self.overall_result = Result.EX_ERROR
240 elif kwargs['test'] == "all":
243 LOGGER.error("Unknown test case or tier '%s', or not "
244 "supported by the given scenario '%s'.",
246 os.environ.get('DEPLOY_SCENARIO', ""))
247 LOGGER.debug("Available tiers are:\n\n%s",
249 return Result.EX_ERROR
252 except BlockingTestFailed:
254 except Exception: # pylint: disable=broad-except
255 LOGGER.exception("Failures when running testcase(s)")
256 self.overall_result = Result.EX_ERROR
257 if not self.tiers.get_test(kwargs['test']):
258 self.summary(self.tiers.get_tier(kwargs['test']))
259 LOGGER.info("Execution exit value: %s", self.overall_result)
260 return self.overall_result
262 def summary(self, tier=None):
263 """To generate functest report showing the overall results"""
264 msg = prettytable.PrettyTable(
265 header_style='upper', padding_width=5,
266 field_names=['env var', 'value'])
267 for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
269 msg.add_row([env_var, os.environ.get(env_var, "")])
270 LOGGER.info("Deployment description:\n\n%s\n", msg)
271 msg = prettytable.PrettyTable(
272 header_style='upper', padding_width=5,
273 field_names=['test case', 'project', 'tier',
274 'duration', 'result'])
275 tiers = [tier] if tier else self.tiers.get_tiers()
276 for each_tier in tiers:
277 for test in each_tier.get_tests():
279 test_case = self.executed_test_cases[test.get_name()]
281 msg.add_row([test.get_name(), test.get_project(),
282 each_tier.get_name(), "00:00", "SKIP"])
284 result = 'PASS' if(test_case.is_successful(
285 ) == test_case.EX_OK) else 'FAIL'
287 [test_case.case_name, test_case.project_name,
288 self.tiers.get_tier_name(test_case.case_name),
289 test_case.get_duration(), result])
290 for test in each_tier.get_skipped_test():
291 msg.add_row([test.get_name(), test.get_project(),
292 each_tier.get_name(), "00:00", "SKIP"])
293 LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
298 logging.config.fileConfig(pkg_resources.resource_filename(
299 'functest', 'ci/logging.ini'))
300 logging.captureWarnings(True)
301 parser = RunTestsParser()
302 args = parser.parse_args(sys.argv[1:])
304 return runner.main(**args).value