3 # Copyright (c) 2016 Ericsson AB and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 """ The entry of running tests:
11 1) Parses xtesting/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
32 from xtesting.ci import tier_builder
33 from xtesting.core import testcase
34 from xtesting.utils import constants
35 from xtesting.utils import env
37 LOGGER = logging.getLogger('xtesting.ci.run_tests')
40 class Result(enum.Enum):
41 """The overall result in enumerated type"""
42 # pylint: disable=too-few-public-methods
47 class BlockingTestFailed(Exception):
48 """Exception when the blocking test fails"""
52 class RunTestsParser(object):
53 """Parser to run tests"""
54 # pylint: disable=too-few-public-methods
57 self.parser = argparse.ArgumentParser()
58 self.parser.add_argument("-t", "--test", dest="test", action='store',
59 help="Test case or tier (group of tests) "
60 "to be executed. It will run all the test "
62 self.parser.add_argument("-n", "--noclean", help="Do not clean "
63 "OpenStack resources after running each "
64 "test (default=false).",
66 self.parser.add_argument("-r", "--report", help="Push results to "
67 "database (default=false).",
70 def parse_args(self, argv=None):
73 It can call sys.exit if arguments are incorrect.
76 the arguments from cmdline
78 return vars(self.parser.parse_args(argv))
85 self.executed_test_cases = {}
86 self.overall_result = Result.EX_OK
87 self.clean_flag = True
88 self.report_flag = False
89 self.tiers = tier_builder.TierBuilder(
90 pkg_resources.resource_filename('xtesting', 'ci/testcases.yaml'))
93 def source_envfile(rc_file=constants.ENV_FILE):
94 """Source the env file passed as arg"""
95 if not os.path.isfile(rc_file):
96 LOGGER.debug("No env file %s found", rc_file)
98 with open(rc_file, "r") as rcfd:
100 var = (line.rstrip('"\n').replace('export ', '').split(
101 "=") if re.search(r'(.*)=(.*)', line) else None)
102 # The two next lines should be modified as soon as rc_file
103 # conforms with common rules. Be aware that it could induce
104 # issues if value starts with '
106 key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
107 value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
108 os.environ[key] = value
110 LOGGER.info("Sourcing env file %s\n\n%s", rc_file, rcfd.read())
113 def get_dict_by_test(testname):
114 # pylint: disable=bad-continuation,missing-docstring
115 with open(pkg_resources.resource_filename(
116 'xtesting', 'ci/testcases.yaml')) as tyaml:
117 testcases_yaml = yaml.safe_load(tyaml)
118 for dic_tier in testcases_yaml.get("tiers"):
119 for dic_testcase in dic_tier['testcases']:
120 if dic_testcase['case_name'] == testname:
122 LOGGER.error('Project %s is not defined in testcases.yaml', testname)
126 def get_run_dict(testname):
127 """Obtain the 'run' block of the testcase from testcases.yaml"""
129 dic_testcase = Runner.get_dict_by_test(testname)
131 LOGGER.error("Cannot get %s's config options", testname)
132 elif 'run' in dic_testcase:
133 return dic_testcase['run']
135 except Exception: # pylint: disable=broad-except
136 LOGGER.exception("Cannot get %s's config options", testname)
139 def run_test(self, test):
140 """Run one test case"""
141 if not test.is_enabled() or test.is_skipped():
142 msg = prettytable.PrettyTable(
143 header_style='upper', padding_width=5,
144 field_names=['test case', 'project', 'duration',
146 msg.add_row([test.get_name(), test.get_project(), "00:00", "SKIP"])
147 LOGGER.info("Test result:\n\n%s\n", msg)
148 return testcase.TestCase.EX_TESTCASE_SKIPPED
149 result = testcase.TestCase.EX_RUN_ERROR
150 run_dict = self.get_run_dict(test.get_name())
153 LOGGER.info("Loading test case '%s'...", test.get_name())
154 module = importlib.import_module(run_dict['module'])
155 cls = getattr(module, run_dict['class'])
156 test_dict = Runner.get_dict_by_test(test.get_name())
157 test_case = cls(**test_dict)
158 self.executed_test_cases[test.get_name()] = test_case
159 test_case.check_requirements()
160 if test_case.is_skipped:
161 LOGGER.info("Skipping test case '%s'...", test.get_name())
162 LOGGER.info("Test result:\n\n%s\n", test_case)
163 return testcase.TestCase.EX_TESTCASE_SKIPPED
164 LOGGER.info("Running test case '%s'...", test.get_name())
166 kwargs = run_dict['args']
167 test_case.run(**kwargs)
171 test_case.push_to_db()
172 result = test_case.is_successful()
173 LOGGER.info("Test result:\n\n%s\n", test_case)
177 LOGGER.exception("Cannot import module %s", run_dict['module'])
178 except AttributeError:
179 LOGGER.exception("Cannot get class %s", run_dict['class'])
180 except Exception: # pylint: disable=broad-except
182 "\n\nPlease fix the testcase %s.\n"
183 "All exceptions should be caught by the testcase instead!"
187 raise Exception("Cannot import the class for the test case.")
190 def run_tier(self, tier):
192 tests = tier.get_tests()
194 LOGGER.info("There are no supported test cases in this tier "
195 "for the given scenario")
196 self.overall_result = Result.EX_ERROR
200 test_case = self.executed_test_cases[test.get_name()]
201 if test_case.is_successful() == test_case.EX_TESTCASE_FAILED:
202 LOGGER.error("The test case '%s' failed.", test.get_name())
203 self.overall_result = Result.EX_ERROR
204 if test.is_blocking():
205 raise BlockingTestFailed(
206 "The test case {} failed and is blocking".format(
208 return self.overall_result
211 """Run all available testcases"""
213 msg = prettytable.PrettyTable(
214 header_style='upper', padding_width=5,
215 field_names=['tiers', 'order', 'description',
217 for tier in self.tiers.get_tiers():
219 tiers_to_run.append(tier)
220 msg.add_row([tier.get_name(), tier.get_order(),
221 textwrap.fill(tier.description, width=40),
222 textwrap.fill(' '.join([str(x.get_name(
223 )) for x in tier.get_tests()]), width=40)])
224 LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
225 for tier in tiers_to_run:
228 def main(self, **kwargs):
229 """Entry point of class Runner"""
230 if 'noclean' in kwargs:
231 self.clean_flag = not kwargs['noclean']
232 if 'report' in kwargs:
233 self.report_flag = kwargs['report']
235 LOGGER.info("Deployment description:\n\n%s\n", env.string())
236 self.source_envfile()
238 LOGGER.debug("Test args: %s", kwargs['test'])
239 if self.tiers.get_tier(kwargs['test']):
240 self.run_tier(self.tiers.get_tier(kwargs['test']))
241 elif self.tiers.get_test(kwargs['test']):
242 result = self.run_test(
243 self.tiers.get_test(kwargs['test']))
244 if result == testcase.TestCase.EX_TESTCASE_FAILED:
245 LOGGER.error("The test case '%s' failed.",
247 self.overall_result = Result.EX_ERROR
248 elif kwargs['test'] == "all":
251 LOGGER.error("Unknown test case or tier '%s', or not "
252 "supported by the given scenario '%s'.",
254 env.get('DEPLOY_SCENARIO'))
255 LOGGER.debug("Available tiers are:\n\n%s",
257 return Result.EX_ERROR
260 except BlockingTestFailed:
262 except Exception: # pylint: disable=broad-except
263 LOGGER.exception("Failures when running testcase(s)")
264 self.overall_result = Result.EX_ERROR
265 if not self.tiers.get_test(kwargs['test']):
266 self.summary(self.tiers.get_tier(kwargs['test']))
267 LOGGER.info("Execution exit value: %s", self.overall_result)
268 return self.overall_result
270 def summary(self, tier=None):
271 """To generate xtesting report showing the overall results"""
272 msg = prettytable.PrettyTable(
273 header_style='upper', padding_width=5,
274 field_names=['test case', 'project', 'tier',
275 'duration', 'result'])
276 tiers = [tier] if tier else self.tiers.get_tiers()
277 for each_tier in tiers:
278 for test in each_tier.get_tests():
280 test_case = self.executed_test_cases[test.get_name()]
282 msg.add_row([test.get_name(), test.get_project(),
283 each_tier.get_name(), "00:00", "SKIP"])
285 if test_case.is_skipped:
288 result = 'PASS' if(test_case.is_successful(
289 ) == test_case.EX_OK) else 'FAIL'
291 [test_case.case_name, test_case.project_name,
292 self.tiers.get_tier_name(test_case.case_name),
293 test_case.get_duration(), result])
294 for test in each_tier.get_skipped_test():
295 msg.add_row([test.get_name(), test.get_project(),
296 each_tier.get_name(), "00:00", "SKIP"])
297 LOGGER.info("Xtesting report:\n\n%s\n", msg)
303 os.makedirs('/var/lib/xtesting/results/')
304 except OSError as ex:
305 if ex.errno != errno.EEXIST:
306 six.print_("Cannot create /var/lib/xtesting/results/")
307 return testcase.TestCase.EX_RUN_ERROR
308 logging.config.fileConfig(pkg_resources.resource_filename(
309 'xtesting', 'ci/logging.ini'))
310 logging.captureWarnings(True)
311 parser = RunTestsParser()
312 args = parser.parse_args(sys.argv[1:])
314 return runner.main(**args).value