Skip testcases by any env var
[functest-xtesting.git] / xtesting / ci / run_tests.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2016 Ericsson AB and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 """ The entry of running tests:
11 1) Parses xtesting/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
14 """
15
16 import argparse
17 import errno
18 import importlib
19 import logging
20 import logging.config
21 import os
22 import re
23 import sys
24 import textwrap
25
26 import enum
27 import pkg_resources
28 import prettytable
29 import six
30 import yaml
31
32 from xtesting.ci import tier_builder
33 from xtesting.core import testcase
34 from xtesting.utils import constants
35 from xtesting.utils import env
36
37 LOGGER = logging.getLogger('xtesting.ci.run_tests')
38
39
40 class Result(enum.Enum):
41     """The overall result in enumerated type"""
42     # pylint: disable=too-few-public-methods
43     EX_OK = os.EX_OK
44     EX_ERROR = -1
45
46
47 class BlockingTestFailed(Exception):
48     """Exception when the blocking test fails"""
49     pass
50
51
52 class TestNotEnabled(Exception):
53     """Exception when the test is not enabled"""
54     pass
55
56
57 class RunTestsParser(object):
58     """Parser to run tests"""
59     # pylint: disable=too-few-public-methods
60
61     def __init__(self):
62         self.parser = argparse.ArgumentParser()
63         self.parser.add_argument("-t", "--test", dest="test", action='store',
64                                  help="Test case or tier (group of tests) "
65                                  "to be executed. It will run all the test "
66                                  "if not specified.")
67         self.parser.add_argument("-n", "--noclean", help="Do not clean "
68                                  "OpenStack resources after running each "
69                                  "test (default=false).",
70                                  action="store_true")
71         self.parser.add_argument("-r", "--report", help="Push results to "
72                                  "database (default=false).",
73                                  action="store_true")
74
75     def parse_args(self, argv=None):
76         """Parse arguments.
77
78         It can call sys.exit if arguments are incorrect.
79
80         Returns:
81             the arguments from cmdline
82         """
83         return vars(self.parser.parse_args(argv))
84
85
86 class Runner(object):
87     """Runner class"""
88
89     def __init__(self):
90         self.executed_test_cases = {}
91         self.overall_result = Result.EX_OK
92         self.clean_flag = True
93         self.report_flag = False
94         self.tiers = tier_builder.TierBuilder(
95             pkg_resources.resource_filename('xtesting', 'ci/testcases.yaml'))
96
97     @staticmethod
98     def source_envfile(rc_file=constants.ENV_FILE):
99         """Source the env file passed as arg"""
100         if not os.path.isfile(rc_file):
101             LOGGER.debug("No env file %s found", rc_file)
102             return
103         with open(rc_file, "r") as rcfd:
104             for line in rcfd:
105                 var = (line.rstrip('"\n').replace('export ', '').split(
106                     "=") if re.search(r'(.*)=(.*)', line) else None)
107                 # The two next lines should be modified as soon as rc_file
108                 # conforms with common rules. Be aware that it could induce
109                 # issues if value starts with '
110                 if var:
111                     key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
112                     value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
113                     os.environ[key] = value
114             rcfd.seek(0, 0)
115             LOGGER.info("Sourcing env file %s\n\n%s", rc_file, rcfd.read())
116
117     @staticmethod
118     def get_dict_by_test(testname):
119         # pylint: disable=bad-continuation,missing-docstring
120         with open(pkg_resources.resource_filename(
121                 'xtesting', 'ci/testcases.yaml')) as tyaml:
122             testcases_yaml = yaml.safe_load(tyaml)
123         for dic_tier in testcases_yaml.get("tiers"):
124             for dic_testcase in dic_tier['testcases']:
125                 if dic_testcase['case_name'] == testname:
126                     return dic_testcase
127         LOGGER.error('Project %s is not defined in testcases.yaml', testname)
128         return None
129
130     @staticmethod
131     def get_run_dict(testname):
132         """Obtain the 'run' block of the testcase from testcases.yaml"""
133         try:
134             dic_testcase = Runner.get_dict_by_test(testname)
135             if not dic_testcase:
136                 LOGGER.error("Cannot get %s's config options", testname)
137             elif 'run' in dic_testcase:
138                 return dic_testcase['run']
139             return None
140         except Exception:  # pylint: disable=broad-except
141             LOGGER.exception("Cannot get %s's config options", testname)
142             return None
143
144     def run_test(self, test):
145         """Run one test case"""
146         if not test.is_enabled():
147             raise TestNotEnabled(
148                 "The test case {} is not enabled".format(test.get_name()))
149         LOGGER.info("Running test case '%s'...", test.get_name())
150         result = testcase.TestCase.EX_RUN_ERROR
151         run_dict = self.get_run_dict(test.get_name())
152         if run_dict:
153             try:
154                 module = importlib.import_module(run_dict['module'])
155                 cls = getattr(module, run_dict['class'])
156                 test_dict = Runner.get_dict_by_test(test.get_name())
157                 test_case = cls(**test_dict)
158                 self.executed_test_cases[test.get_name()] = test_case
159                 try:
160                     kwargs = run_dict['args']
161                     test_case.run(**kwargs)
162                 except KeyError:
163                     test_case.run()
164                 if self.report_flag:
165                     test_case.push_to_db()
166                 result = test_case.is_successful()
167                 LOGGER.info("Test result:\n\n%s\n", test_case)
168                 if self.clean_flag:
169                     test_case.clean()
170             except ImportError:
171                 LOGGER.exception("Cannot import module %s", run_dict['module'])
172             except AttributeError:
173                 LOGGER.exception("Cannot get class %s", run_dict['class'])
174         else:
175             raise Exception("Cannot import the class for the test case.")
176         return result
177
178     def run_tier(self, tier):
179         """Run one tier"""
180         tier_name = tier.get_name()
181         tests = tier.get_tests()
182         if not tests:
183             LOGGER.info("There are no supported test cases in this tier "
184                         "for the given scenario")
185             self.overall_result = Result.EX_ERROR
186         else:
187             LOGGER.info("Running tier '%s'", tier_name)
188             for test in tests:
189                 self.run_test(test)
190                 test_case = self.executed_test_cases[test.get_name()]
191                 if test_case.is_successful() != testcase.TestCase.EX_OK:
192                     LOGGER.error("The test case '%s' failed.", test.get_name())
193                     self.overall_result = Result.EX_ERROR
194                     if test.is_blocking():
195                         raise BlockingTestFailed(
196                             "The test case {} failed and is blocking".format(
197                                 test.get_name()))
198         return self.overall_result
199
200     def run_all(self):
201         """Run all available testcases"""
202         tiers_to_run = []
203         msg = prettytable.PrettyTable(
204             header_style='upper', padding_width=5,
205             field_names=['tiers', 'order', 'description',
206                          'testcases'])
207         for tier in self.tiers.get_tiers():
208             if tier.get_tests():
209                 tiers_to_run.append(tier)
210                 msg.add_row([tier.get_name(), tier.get_order(),
211                              textwrap.fill(tier.description, width=40),
212                              textwrap.fill(' '.join([str(x.get_name(
213                                  )) for x in tier.get_tests()]), width=40)])
214         LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
215         for tier in tiers_to_run:
216             self.run_tier(tier)
217
218     def main(self, **kwargs):
219         """Entry point of class Runner"""
220         if 'noclean' in kwargs:
221             self.clean_flag = not kwargs['noclean']
222         if 'report' in kwargs:
223             self.report_flag = kwargs['report']
224         try:
225             LOGGER.info("Deployment description:\n\n%s\n", env.string())
226             self.source_envfile()
227             if 'test' in kwargs:
228                 LOGGER.debug("Test args: %s", kwargs['test'])
229                 if self.tiers.get_tier(kwargs['test']):
230                     self.run_tier(self.tiers.get_tier(kwargs['test']))
231                 elif self.tiers.get_test(kwargs['test']):
232                     result = self.run_test(
233                         self.tiers.get_test(kwargs['test']))
234                     if result != testcase.TestCase.EX_OK:
235                         LOGGER.error("The test case '%s' failed.",
236                                      kwargs['test'])
237                         self.overall_result = Result.EX_ERROR
238                 elif kwargs['test'] == "all":
239                     self.run_all()
240                 else:
241                     LOGGER.error("Unknown test case or tier '%s', or not "
242                                  "supported by the given scenario '%s'.",
243                                  kwargs['test'],
244                                  env.get('DEPLOY_SCENARIO'))
245                     LOGGER.debug("Available tiers are:\n\n%s",
246                                  self.tiers)
247                     return Result.EX_ERROR
248             else:
249                 self.run_all()
250         except BlockingTestFailed:
251             pass
252         except Exception:  # pylint: disable=broad-except
253             LOGGER.exception("Failures when running testcase(s)")
254             self.overall_result = Result.EX_ERROR
255         if not self.tiers.get_test(kwargs['test']):
256             self.summary(self.tiers.get_tier(kwargs['test']))
257         LOGGER.info("Execution exit value: %s", self.overall_result)
258         return self.overall_result
259
260     def summary(self, tier=None):
261         """To generate xtesting report showing the overall results"""
262         msg = prettytable.PrettyTable(
263             header_style='upper', padding_width=5,
264             field_names=['test case', 'project', 'tier',
265                          'duration', 'result'])
266         tiers = [tier] if tier else self.tiers.get_tiers()
267         for each_tier in tiers:
268             for test in each_tier.get_tests():
269                 try:
270                     test_case = self.executed_test_cases[test.get_name()]
271                 except KeyError:
272                     msg.add_row([test.get_name(), test.get_project(),
273                                  each_tier.get_name(), "00:00", "SKIP"])
274                 else:
275                     result = 'PASS' if(test_case.is_successful(
276                         ) == test_case.EX_OK) else 'FAIL'
277                     msg.add_row(
278                         [test_case.case_name, test_case.project_name,
279                          self.tiers.get_tier_name(test_case.case_name),
280                          test_case.get_duration(), result])
281             for test in each_tier.get_skipped_test():
282                 msg.add_row([test.get_name(), test.get_project(),
283                              each_tier.get_name(), "00:00", "SKIP"])
284         LOGGER.info("Xtesting report:\n\n%s\n", msg)
285
286
287 def main():
288     """Entry point"""
289     try:
290         os.makedirs('/var/lib/xtesting/results/')
291     except OSError as ex:
292         if ex.errno != errno.EEXIST:
293             six.print_("Cannot create /var/lib/xtesting/results/")
294             return testcase.TestCase.EX_RUN_ERROR
295     logging.config.fileConfig(pkg_resources.resource_filename(
296         'xtesting', 'ci/logging.ini'))
297     logging.captureWarnings(True)
298     parser = RunTestsParser()
299     args = parser.parse_args(sys.argv[1:])
300     runner = Runner()
301     return runner.main(**args).value