Return all status when running tiers
[functest-xtesting.git] / xtesting / ci / run_tests.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2016 Ericsson AB and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 """ The entry of running tests:
11 1) Parses xtesting/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
14 """
15
16 import argparse
17 import errno
18 import importlib
19 import logging
20 import logging.config
21 import os
22 import re
23 import sys
24 import textwrap
25 import pkg_resources
26
27 import enum
28 import prettytable
29 import six
30 import yaml
31
32 from xtesting.ci import tier_builder
33 from xtesting.core import testcase
34 from xtesting.utils import constants
35 from xtesting.utils import env
36
37 LOGGER = logging.getLogger('xtesting.ci.run_tests')
38
39
40 class Result(enum.Enum):
41     """The overall result in enumerated type"""
42     # pylint: disable=too-few-public-methods
43     EX_OK = os.EX_OK
44     EX_ERROR = -1
45
46
47 class BlockingTestFailed(Exception):
48     """Exception when the blocking test fails"""
49     pass
50
51
52 class TestNotEnabled(Exception):
53     """Exception when the test is not enabled"""
54     pass
55
56
57 class RunTestsParser(object):
58     """Parser to run tests"""
59     # pylint: disable=too-few-public-methods
60
61     def __init__(self):
62         self.parser = argparse.ArgumentParser()
63         self.parser.add_argument("-t", "--test", dest="test", action='store',
64                                  help="Test case or tier (group of tests) "
65                                  "to be executed. It will run all the test "
66                                  "if not specified.")
67         self.parser.add_argument("-n", "--noclean", help="Do not clean "
68                                  "OpenStack resources after running each "
69                                  "test (default=false).",
70                                  action="store_true")
71         self.parser.add_argument("-r", "--report", help="Push results to "
72                                  "database (default=false).",
73                                  action="store_true")
74
75     def parse_args(self, argv=None):
76         """Parse arguments.
77
78         It can call sys.exit if arguments are incorrect.
79
80         Returns:
81             the arguments from cmdline
82         """
83         return vars(self.parser.parse_args(argv))
84
85
86 class Runner(object):
87     """Runner class"""
88
89     def __init__(self):
90         self.executed_test_cases = {}
91         self.overall_result = Result.EX_OK
92         self.clean_flag = True
93         self.report_flag = False
94         self.tiers = tier_builder.TierBuilder(
95             env.get('INSTALLER_TYPE'),
96             env.get('DEPLOY_SCENARIO'),
97             pkg_resources.resource_filename('xtesting', 'ci/testcases.yaml'))
98
99     @staticmethod
100     def source_envfile(rc_file=constants.ENV_FILE):
101         """Source the env file passed as arg"""
102         if not os.path.isfile(rc_file):
103             LOGGER.debug("No env file %s found", rc_file)
104             return
105         with open(rc_file, "r") as rcfd:
106             for line in rcfd:
107                 var = (line.rstrip('"\n').replace('export ', '').split(
108                     "=") if re.search(r'(.*)=(.*)', line) else None)
109                 # The two next lines should be modified as soon as rc_file
110                 # conforms with common rules. Be aware that it could induce
111                 # issues if value starts with '
112                 if var:
113                     key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
114                     value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
115                     os.environ[key] = value
116             rcfd.seek(0, 0)
117             LOGGER.info("Sourcing env file %s\n\n%s", rc_file, rcfd.read())
118
119     @staticmethod
120     def get_dict_by_test(testname):
121         # pylint: disable=bad-continuation,missing-docstring
122         with open(pkg_resources.resource_filename(
123                 'xtesting', 'ci/testcases.yaml')) as tyaml:
124             testcases_yaml = yaml.safe_load(tyaml)
125         for dic_tier in testcases_yaml.get("tiers"):
126             for dic_testcase in dic_tier['testcases']:
127                 if dic_testcase['case_name'] == testname:
128                     return dic_testcase
129         LOGGER.error('Project %s is not defined in testcases.yaml', testname)
130         return None
131
132     @staticmethod
133     def get_run_dict(testname):
134         """Obtain the 'run' block of the testcase from testcases.yaml"""
135         try:
136             dic_testcase = Runner.get_dict_by_test(testname)
137             if not dic_testcase:
138                 LOGGER.error("Cannot get %s's config options", testname)
139             elif 'run' in dic_testcase:
140                 return dic_testcase['run']
141             return None
142         except Exception:  # pylint: disable=broad-except
143             LOGGER.exception("Cannot get %s's config options", testname)
144             return None
145
146     def run_test(self, test):
147         """Run one test case"""
148         if not test.is_enabled():
149             raise TestNotEnabled(
150                 "The test case {} is not enabled".format(test.get_name()))
151         LOGGER.info("Running test case '%s'...", test.get_name())
152         result = testcase.TestCase.EX_RUN_ERROR
153         run_dict = self.get_run_dict(test.get_name())
154         if run_dict:
155             try:
156                 module = importlib.import_module(run_dict['module'])
157                 cls = getattr(module, run_dict['class'])
158                 test_dict = Runner.get_dict_by_test(test.get_name())
159                 test_case = cls(**test_dict)
160                 self.executed_test_cases[test.get_name()] = test_case
161                 try:
162                     kwargs = run_dict['args']
163                     test_case.run(**kwargs)
164                 except KeyError:
165                     test_case.run()
166                 if self.report_flag:
167                     test_case.push_to_db()
168                 result = test_case.is_successful()
169                 LOGGER.info("Test result:\n\n%s\n", test_case)
170                 if self.clean_flag:
171                     test_case.clean()
172             except ImportError:
173                 LOGGER.exception("Cannot import module %s", run_dict['module'])
174             except AttributeError:
175                 LOGGER.exception("Cannot get class %s", run_dict['class'])
176         else:
177             raise Exception("Cannot import the class for the test case.")
178         return result
179
180     def run_tier(self, tier):
181         """Run one tier"""
182         tier_name = tier.get_name()
183         tests = tier.get_tests()
184         if not tests:
185             LOGGER.info("There are no supported test cases in this tier "
186                         "for the given scenario")
187             self.overall_result = Result.EX_ERROR
188         else:
189             LOGGER.info("Running tier '%s'", tier_name)
190             for test in tests:
191                 self.run_test(test)
192                 test_case = self.executed_test_cases[test.get_name()]
193                 if test_case.is_successful() != testcase.TestCase.EX_OK:
194                     LOGGER.error("The test case '%s' failed.", test.get_name())
195                     self.overall_result = Result.EX_ERROR
196                     if test.is_blocking():
197                         raise BlockingTestFailed(
198                             "The test case {} failed and is blocking".format(
199                                 test.get_name()))
200         return self.overall_result
201
202     def run_all(self):
203         """Run all available testcases"""
204         tiers_to_run = []
205         msg = prettytable.PrettyTable(
206             header_style='upper', padding_width=5,
207             field_names=['tiers', 'order', 'CI Loop', 'description',
208                          'testcases'])
209         for tier in self.tiers.get_tiers():
210             ci_loop = env.get('CI_LOOP')
211             if (tier.get_tests() and
212                     re.search(ci_loop, tier.get_ci_loop()) is not None):
213                 tiers_to_run.append(tier)
214                 msg.add_row([tier.get_name(), tier.get_order(),
215                              tier.get_ci_loop(),
216                              textwrap.fill(tier.description, width=40),
217                              textwrap.fill(' '.join([str(x.get_name(
218                                  )) for x in tier.get_tests()]), width=40)])
219         LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
220         for tier in tiers_to_run:
221             self.run_tier(tier)
222
223     def main(self, **kwargs):
224         """Entry point of class Runner"""
225         if 'noclean' in kwargs:
226             self.clean_flag = not kwargs['noclean']
227         if 'report' in kwargs:
228             self.report_flag = kwargs['report']
229         try:
230             LOGGER.info("Deployment description:\n\n%s\n", env.string())
231             self.source_envfile()
232             if 'test' in kwargs:
233                 LOGGER.debug("Test args: %s", kwargs['test'])
234                 if self.tiers.get_tier(kwargs['test']):
235                     self.run_tier(self.tiers.get_tier(kwargs['test']))
236                 elif self.tiers.get_test(kwargs['test']):
237                     result = self.run_test(
238                         self.tiers.get_test(kwargs['test']))
239                     if result != testcase.TestCase.EX_OK:
240                         LOGGER.error("The test case '%s' failed.",
241                                      kwargs['test'])
242                         self.overall_result = Result.EX_ERROR
243                 elif kwargs['test'] == "all":
244                     self.run_all()
245                 else:
246                     LOGGER.error("Unknown test case or tier '%s', or not "
247                                  "supported by the given scenario '%s'.",
248                                  kwargs['test'],
249                                  env.get('DEPLOY_SCENARIO'))
250                     LOGGER.debug("Available tiers are:\n\n%s",
251                                  self.tiers)
252                     return Result.EX_ERROR
253             else:
254                 self.run_all()
255         except BlockingTestFailed:
256             pass
257         except Exception:  # pylint: disable=broad-except
258             LOGGER.exception("Failures when running testcase(s)")
259             self.overall_result = Result.EX_ERROR
260         if not self.tiers.get_test(kwargs['test']):
261             self.summary(self.tiers.get_tier(kwargs['test']))
262         LOGGER.info("Execution exit value: %s", self.overall_result)
263         return self.overall_result
264
265     def summary(self, tier=None):
266         """To generate xtesting report showing the overall results"""
267         msg = prettytable.PrettyTable(
268             header_style='upper', padding_width=5,
269             field_names=['test case', 'project', 'tier',
270                          'duration', 'result'])
271         tiers = [tier] if tier else self.tiers.get_tiers()
272         for each_tier in tiers:
273             for test in each_tier.get_tests():
274                 try:
275                     test_case = self.executed_test_cases[test.get_name()]
276                 except KeyError:
277                     msg.add_row([test.get_name(), test.get_project(),
278                                  each_tier.get_name(), "00:00", "SKIP"])
279                 else:
280                     result = 'PASS' if(test_case.is_successful(
281                         ) == test_case.EX_OK) else 'FAIL'
282                     msg.add_row(
283                         [test_case.case_name, test_case.project_name,
284                          self.tiers.get_tier_name(test_case.case_name),
285                          test_case.get_duration(), result])
286             for test in each_tier.get_skipped_test():
287                 msg.add_row([test.get_name(), test.get_project(),
288                              each_tier.get_name(), "00:00", "SKIP"])
289         LOGGER.info("Xtesting report:\n\n%s\n", msg)
290
291
292 def main():
293     """Entry point"""
294     try:
295         os.makedirs('/var/lib/xtesting/results/')
296     except OSError as ex:
297         if ex.errno != errno.EEXIST:
298             six.print_("Cannot create /var/lib/xtesting/results/")
299             return testcase.TestCase.EX_RUN_ERROR
300     logging.config.fileConfig(pkg_resources.resource_filename(
301         'xtesting', 'ci/logging.ini'))
302     logging.captureWarnings(True)
303     parser = RunTestsParser()
304     args = parser.parse_args(sys.argv[1:])
305     runner = Runner()
306     return runner.main(**args).value