3 # Copyright (c) 2016 Ericsson AB and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 """ The entry of running tests:
11 1) Parses xtesting/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
32 from xtesting.ci import tier_builder
33 from xtesting.core import testcase
34 from xtesting.utils import constants
35 from xtesting.utils import env
37 LOGGER = logging.getLogger('xtesting.ci.run_tests')
40 class Result(enum.Enum):
41 """The overall result in enumerated type"""
42 # pylint: disable=too-few-public-methods
47 class BlockingTestFailed(Exception):
48 """Exception when the blocking test fails"""
52 class TestNotEnabled(Exception):
53 """Exception when the test is not enabled"""
57 class RunTestsParser(object):
58 """Parser to run tests"""
59 # pylint: disable=too-few-public-methods
62 self.parser = argparse.ArgumentParser()
63 self.parser.add_argument("-t", "--test", dest="test", action='store',
64 help="Test case or tier (group of tests) "
65 "to be executed. It will run all the test "
67 self.parser.add_argument("-n", "--noclean", help="Do not clean "
68 "OpenStack resources after running each "
69 "test (default=false).",
71 self.parser.add_argument("-r", "--report", help="Push results to "
72 "database (default=false).",
75 def parse_args(self, argv=None):
78 It can call sys.exit if arguments are incorrect.
81 the arguments from cmdline
83 return vars(self.parser.parse_args(argv))
90 self.executed_test_cases = {}
91 self.overall_result = Result.EX_OK
92 self.clean_flag = True
93 self.report_flag = False
94 self.tiers = tier_builder.TierBuilder(
95 pkg_resources.resource_filename('xtesting', 'ci/testcases.yaml'))
98 def source_envfile(rc_file=constants.ENV_FILE):
99 """Source the env file passed as arg"""
100 if not os.path.isfile(rc_file):
101 LOGGER.debug("No env file %s found", rc_file)
103 with open(rc_file, "r") as rcfd:
105 var = (line.rstrip('"\n').replace('export ', '').split(
106 "=") if re.search(r'(.*)=(.*)', line) else None)
107 # The two next lines should be modified as soon as rc_file
108 # conforms with common rules. Be aware that it could induce
109 # issues if value starts with '
111 key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
112 value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
113 os.environ[key] = value
115 LOGGER.info("Sourcing env file %s\n\n%s", rc_file, rcfd.read())
118 def get_dict_by_test(testname):
119 # pylint: disable=bad-continuation,missing-docstring
120 with open(pkg_resources.resource_filename(
121 'xtesting', 'ci/testcases.yaml')) as tyaml:
122 testcases_yaml = yaml.safe_load(tyaml)
123 for dic_tier in testcases_yaml.get("tiers"):
124 for dic_testcase in dic_tier['testcases']:
125 if dic_testcase['case_name'] == testname:
127 LOGGER.error('Project %s is not defined in testcases.yaml', testname)
131 def get_run_dict(testname):
132 """Obtain the 'run' block of the testcase from testcases.yaml"""
134 dic_testcase = Runner.get_dict_by_test(testname)
136 LOGGER.error("Cannot get %s's config options", testname)
137 elif 'run' in dic_testcase:
138 return dic_testcase['run']
140 except Exception: # pylint: disable=broad-except
141 LOGGER.exception("Cannot get %s's config options", testname)
144 def run_test(self, test):
145 """Run one test case"""
146 if not test.is_enabled():
147 raise TestNotEnabled(
148 "The test case {} is not enabled".format(test.get_name()))
149 LOGGER.info("Running test case '%s'...", test.get_name())
150 result = testcase.TestCase.EX_RUN_ERROR
151 run_dict = self.get_run_dict(test.get_name())
154 module = importlib.import_module(run_dict['module'])
155 cls = getattr(module, run_dict['class'])
156 test_dict = Runner.get_dict_by_test(test.get_name())
157 test_case = cls(**test_dict)
158 self.executed_test_cases[test.get_name()] = test_case
160 kwargs = run_dict['args']
161 test_case.run(**kwargs)
165 test_case.push_to_db()
166 result = test_case.is_successful()
167 LOGGER.info("Test result:\n\n%s\n", test_case)
171 LOGGER.exception("Cannot import module %s", run_dict['module'])
172 except AttributeError:
173 LOGGER.exception("Cannot get class %s", run_dict['class'])
175 raise Exception("Cannot import the class for the test case.")
178 def run_tier(self, tier):
180 tier_name = tier.get_name()
181 tests = tier.get_tests()
183 LOGGER.info("There are no supported test cases in this tier "
184 "for the given scenario")
185 self.overall_result = Result.EX_ERROR
187 LOGGER.info("Running tier '%s'", tier_name)
190 test_case = self.executed_test_cases[test.get_name()]
191 if test_case.is_successful() != testcase.TestCase.EX_OK:
192 LOGGER.error("The test case '%s' failed.", test.get_name())
193 self.overall_result = Result.EX_ERROR
194 if test.is_blocking():
195 raise BlockingTestFailed(
196 "The test case {} failed and is blocking".format(
198 return self.overall_result
201 """Run all available testcases"""
203 msg = prettytable.PrettyTable(
204 header_style='upper', padding_width=5,
205 field_names=['tiers', 'order', 'description',
207 for tier in self.tiers.get_tiers():
209 tiers_to_run.append(tier)
210 msg.add_row([tier.get_name(), tier.get_order(),
211 textwrap.fill(tier.description, width=40),
212 textwrap.fill(' '.join([str(x.get_name(
213 )) for x in tier.get_tests()]), width=40)])
214 LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
215 for tier in tiers_to_run:
218 def main(self, **kwargs):
219 """Entry point of class Runner"""
220 if 'noclean' in kwargs:
221 self.clean_flag = not kwargs['noclean']
222 if 'report' in kwargs:
223 self.report_flag = kwargs['report']
225 LOGGER.info("Deployment description:\n\n%s\n", env.string())
226 self.source_envfile()
228 LOGGER.debug("Test args: %s", kwargs['test'])
229 if self.tiers.get_tier(kwargs['test']):
230 self.run_tier(self.tiers.get_tier(kwargs['test']))
231 elif self.tiers.get_test(kwargs['test']):
232 result = self.run_test(
233 self.tiers.get_test(kwargs['test']))
234 if result != testcase.TestCase.EX_OK:
235 LOGGER.error("The test case '%s' failed.",
237 self.overall_result = Result.EX_ERROR
238 elif kwargs['test'] == "all":
241 LOGGER.error("Unknown test case or tier '%s', or not "
242 "supported by the given scenario '%s'.",
244 env.get('DEPLOY_SCENARIO'))
245 LOGGER.debug("Available tiers are:\n\n%s",
247 return Result.EX_ERROR
250 except BlockingTestFailed:
252 except Exception: # pylint: disable=broad-except
253 LOGGER.exception("Failures when running testcase(s)")
254 self.overall_result = Result.EX_ERROR
255 if not self.tiers.get_test(kwargs['test']):
256 self.summary(self.tiers.get_tier(kwargs['test']))
257 LOGGER.info("Execution exit value: %s", self.overall_result)
258 return self.overall_result
260 def summary(self, tier=None):
261 """To generate xtesting report showing the overall results"""
262 msg = prettytable.PrettyTable(
263 header_style='upper', padding_width=5,
264 field_names=['test case', 'project', 'tier',
265 'duration', 'result'])
266 tiers = [tier] if tier else self.tiers.get_tiers()
267 for each_tier in tiers:
268 for test in each_tier.get_tests():
270 test_case = self.executed_test_cases[test.get_name()]
272 msg.add_row([test.get_name(), test.get_project(),
273 each_tier.get_name(), "00:00", "SKIP"])
275 result = 'PASS' if(test_case.is_successful(
276 ) == test_case.EX_OK) else 'FAIL'
278 [test_case.case_name, test_case.project_name,
279 self.tiers.get_tier_name(test_case.case_name),
280 test_case.get_duration(), result])
281 for test in each_tier.get_skipped_test():
282 msg.add_row([test.get_name(), test.get_project(),
283 each_tier.get_name(), "00:00", "SKIP"])
284 LOGGER.info("Xtesting report:\n\n%s\n", msg)
290 os.makedirs('/var/lib/xtesting/results/')
291 except OSError as ex:
292 if ex.errno != errno.EEXIST:
293 six.print_("Cannot create /var/lib/xtesting/results/")
294 return testcase.TestCase.EX_RUN_ERROR
295 logging.config.fileConfig(pkg_resources.resource_filename(
296 'xtesting', 'ci/logging.ini'))
297 logging.captureWarnings(True)
298 parser = RunTestsParser()
299 args = parser.parse_args(sys.argv[1:])
301 return runner.main(**args).value