Skip the selected testcase too
[functest-xtesting.git] / xtesting / ci / run_tests.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2016 Ericsson AB and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 """ The entry of running tests:
11 1) Parses xtesting/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
14 """
15
16 import argparse
17 import errno
18 import importlib
19 import logging
20 import logging.config
21 import os
22 import re
23 import sys
24 import textwrap
25
26 import enum
27 import pkg_resources
28 import prettytable
29 import six
30 import yaml
31
32 from xtesting.ci import tier_builder
33 from xtesting.core import testcase
34 from xtesting.utils import constants
35 from xtesting.utils import env
36
37 LOGGER = logging.getLogger('xtesting.ci.run_tests')
38
39
40 class Result(enum.Enum):
41     """The overall result in enumerated type"""
42     # pylint: disable=too-few-public-methods
43     EX_OK = os.EX_OK
44     EX_ERROR = -1
45
46
47 class BlockingTestFailed(Exception):
48     """Exception when the blocking test fails"""
49     pass
50
51
52 class RunTestsParser(object):
53     """Parser to run tests"""
54     # pylint: disable=too-few-public-methods
55
56     def __init__(self):
57         self.parser = argparse.ArgumentParser()
58         self.parser.add_argument("-t", "--test", dest="test", action='store',
59                                  help="Test case or tier (group of tests) "
60                                  "to be executed. It will run all the test "
61                                  "if not specified.")
62         self.parser.add_argument("-n", "--noclean", help="Do not clean "
63                                  "OpenStack resources after running each "
64                                  "test (default=false).",
65                                  action="store_true")
66         self.parser.add_argument("-r", "--report", help="Push results to "
67                                  "database (default=false).",
68                                  action="store_true")
69
70     def parse_args(self, argv=None):
71         """Parse arguments.
72
73         It can call sys.exit if arguments are incorrect.
74
75         Returns:
76             the arguments from cmdline
77         """
78         return vars(self.parser.parse_args(argv))
79
80
81 class Runner(object):
82     """Runner class"""
83
84     def __init__(self):
85         self.executed_test_cases = {}
86         self.overall_result = Result.EX_OK
87         self.clean_flag = True
88         self.report_flag = False
89         self.tiers = tier_builder.TierBuilder(
90             pkg_resources.resource_filename('xtesting', 'ci/testcases.yaml'))
91
92     @staticmethod
93     def source_envfile(rc_file=constants.ENV_FILE):
94         """Source the env file passed as arg"""
95         if not os.path.isfile(rc_file):
96             LOGGER.debug("No env file %s found", rc_file)
97             return
98         with open(rc_file, "r") as rcfd:
99             for line in rcfd:
100                 var = (line.rstrip('"\n').replace('export ', '').split(
101                     "=") if re.search(r'(.*)=(.*)', line) else None)
102                 # The two next lines should be modified as soon as rc_file
103                 # conforms with common rules. Be aware that it could induce
104                 # issues if value starts with '
105                 if var:
106                     key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
107                     value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
108                     os.environ[key] = value
109             rcfd.seek(0, 0)
110             LOGGER.info("Sourcing env file %s\n\n%s", rc_file, rcfd.read())
111
112     @staticmethod
113     def get_dict_by_test(testname):
114         # pylint: disable=bad-continuation,missing-docstring
115         with open(pkg_resources.resource_filename(
116                 'xtesting', 'ci/testcases.yaml')) as tyaml:
117             testcases_yaml = yaml.safe_load(tyaml)
118         for dic_tier in testcases_yaml.get("tiers"):
119             for dic_testcase in dic_tier['testcases']:
120                 if dic_testcase['case_name'] == testname:
121                     return dic_testcase
122         LOGGER.error('Project %s is not defined in testcases.yaml', testname)
123         return None
124
125     @staticmethod
126     def get_run_dict(testname):
127         """Obtain the 'run' block of the testcase from testcases.yaml"""
128         try:
129             dic_testcase = Runner.get_dict_by_test(testname)
130             if not dic_testcase:
131                 LOGGER.error("Cannot get %s's config options", testname)
132             elif 'run' in dic_testcase:
133                 return dic_testcase['run']
134             return None
135         except Exception:  # pylint: disable=broad-except
136             LOGGER.exception("Cannot get %s's config options", testname)
137             return None
138
139     def run_test(self, test):
140         """Run one test case"""
141         if not test.is_enabled() or test.is_skipped():
142             msg = prettytable.PrettyTable(
143                 header_style='upper', padding_width=5,
144                 field_names=['test case', 'project', 'duration',
145                              'result'])
146             msg.add_row([test.get_name(), test.get_project(), "00:00", "SKIP"])
147             LOGGER.info("Test result:\n\n%s\n", msg)
148             return testcase.TestCase.EX_TESTCASE_SKIPPED
149         result = testcase.TestCase.EX_RUN_ERROR
150         run_dict = self.get_run_dict(test.get_name())
151         if run_dict:
152             try:
153                 LOGGER.info("Loading test case '%s'...", test.get_name())
154                 module = importlib.import_module(run_dict['module'])
155                 cls = getattr(module, run_dict['class'])
156                 test_dict = Runner.get_dict_by_test(test.get_name())
157                 test_case = cls(**test_dict)
158                 self.executed_test_cases[test.get_name()] = test_case
159                 test_case.check_requirements()
160                 if test_case.is_skipped:
161                     LOGGER.info("Skipping test case '%s'...", test.get_name())
162                     LOGGER.info("Test result:\n\n%s\n", test_case)
163                     return testcase.TestCase.EX_TESTCASE_SKIPPED
164                 LOGGER.info("Running test case '%s'...", test.get_name())
165                 try:
166                     kwargs = run_dict['args']
167                     test_case.run(**kwargs)
168                 except KeyError:
169                     test_case.run()
170                 if self.report_flag:
171                     test_case.push_to_db()
172                 result = test_case.is_successful()
173                 LOGGER.info("Test result:\n\n%s\n", test_case)
174                 if self.clean_flag:
175                     test_case.clean()
176             except ImportError:
177                 LOGGER.exception("Cannot import module %s", run_dict['module'])
178             except AttributeError:
179                 LOGGER.exception("Cannot get class %s", run_dict['class'])
180             except Exception:  # pylint: disable=broad-except
181                 LOGGER.exception(
182                     "\n\nPlease fix the testcase %s.\n"
183                     "All exceptions should be caught by the testcase instead!"
184                     "\n\n",
185                     test.get_name())
186         else:
187             raise Exception("Cannot import the class for the test case.")
188         return result
189
190     def run_tier(self, tier):
191         """Run one tier"""
192         tests = tier.get_tests()
193         if not tests:
194             LOGGER.info("There are no supported test cases in this tier "
195                         "for the given scenario")
196             self.overall_result = Result.EX_ERROR
197         else:
198             for test in tests:
199                 self.run_test(test)
200                 test_case = self.executed_test_cases[test.get_name()]
201                 if test_case.is_successful() == test_case.EX_TESTCASE_FAILED:
202                     LOGGER.error("The test case '%s' failed.", test.get_name())
203                     self.overall_result = Result.EX_ERROR
204                     if test.is_blocking():
205                         raise BlockingTestFailed(
206                             "The test case {} failed and is blocking".format(
207                                 test.get_name()))
208         return self.overall_result
209
210     def run_all(self):
211         """Run all available testcases"""
212         tiers_to_run = []
213         msg = prettytable.PrettyTable(
214             header_style='upper', padding_width=5,
215             field_names=['tiers', 'order', 'description',
216                          'testcases'])
217         for tier in self.tiers.get_tiers():
218             if tier.get_tests():
219                 tiers_to_run.append(tier)
220                 msg.add_row([tier.get_name(), tier.get_order(),
221                              textwrap.fill(tier.description, width=40),
222                              textwrap.fill(' '.join([str(x.get_name(
223                                  )) for x in tier.get_tests()]), width=40)])
224         LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
225         for tier in tiers_to_run:
226             self.run_tier(tier)
227
228     def main(self, **kwargs):
229         """Entry point of class Runner"""
230         if 'noclean' in kwargs:
231             self.clean_flag = not kwargs['noclean']
232         if 'report' in kwargs:
233             self.report_flag = kwargs['report']
234         try:
235             LOGGER.info("Deployment description:\n\n%s\n", env.string())
236             self.source_envfile()
237             if 'test' in kwargs:
238                 LOGGER.debug("Test args: %s", kwargs['test'])
239                 if self.tiers.get_tier(kwargs['test']):
240                     self.run_tier(self.tiers.get_tier(kwargs['test']))
241                 elif self.tiers.get_test(kwargs['test']):
242                     result = self.run_test(
243                         self.tiers.get_test(kwargs['test']))
244                     if result == testcase.TestCase.EX_TESTCASE_FAILED:
245                         LOGGER.error("The test case '%s' failed.",
246                                      kwargs['test'])
247                         self.overall_result = Result.EX_ERROR
248                 elif kwargs['test'] == "all":
249                     self.run_all()
250                 else:
251                     LOGGER.error("Unknown test case or tier '%s', or not "
252                                  "supported by the given scenario '%s'.",
253                                  kwargs['test'],
254                                  env.get('DEPLOY_SCENARIO'))
255                     LOGGER.debug("Available tiers are:\n\n%s",
256                                  self.tiers)
257                     return Result.EX_ERROR
258             else:
259                 self.run_all()
260         except BlockingTestFailed:
261             pass
262         except Exception:  # pylint: disable=broad-except
263             LOGGER.exception("Failures when running testcase(s)")
264             self.overall_result = Result.EX_ERROR
265         if not self.tiers.get_test(kwargs['test']):
266             self.summary(self.tiers.get_tier(kwargs['test']))
267         LOGGER.info("Execution exit value: %s", self.overall_result)
268         return self.overall_result
269
270     def summary(self, tier=None):
271         """To generate xtesting report showing the overall results"""
272         msg = prettytable.PrettyTable(
273             header_style='upper', padding_width=5,
274             field_names=['test case', 'project', 'tier',
275                          'duration', 'result'])
276         tiers = [tier] if tier else self.tiers.get_tiers()
277         for each_tier in tiers:
278             for test in each_tier.get_tests():
279                 try:
280                     test_case = self.executed_test_cases[test.get_name()]
281                 except KeyError:
282                     msg.add_row([test.get_name(), test.get_project(),
283                                  each_tier.get_name(), "00:00", "SKIP"])
284                 else:
285                     if test_case.is_skipped:
286                         result = 'SKIP'
287                     else:
288                         result = 'PASS' if(test_case.is_successful(
289                             ) == test_case.EX_OK) else 'FAIL'
290                     msg.add_row(
291                         [test_case.case_name, test_case.project_name,
292                          self.tiers.get_tier_name(test_case.case_name),
293                          test_case.get_duration(), result])
294             for test in each_tier.get_skipped_test():
295                 msg.add_row([test.get_name(), test.get_project(),
296                              each_tier.get_name(), "00:00", "SKIP"])
297         LOGGER.info("Xtesting report:\n\n%s\n", msg)
298
299
300 def main():
301     """Entry point"""
302     try:
303         os.makedirs('/var/lib/xtesting/results/')
304     except OSError as ex:
305         if ex.errno != errno.EEXIST:
306             six.print_("Cannot create /var/lib/xtesting/results/")
307             return testcase.TestCase.EX_RUN_ERROR
308     logging.config.fileConfig(pkg_resources.resource_filename(
309         'xtesting', 'ci/logging.ini'))
310     logging.captureWarnings(True)
311     parser = RunTestsParser()
312     args = parser.parse_args(sys.argv[1:])
313     runner = Runner()
314     return runner.main(**args).value