Stop filtering the project when calculating results
[functest-xtesting.git] / xtesting / ci / run_tests.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2016 Ericsson AB and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 """ The entry of running tests:
11 1) Parses xtesting/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
14 """
15
16 import argparse
17 import errno
18 import importlib
19 import logging
20 import logging.config
21 import os
22 import re
23 import sys
24 import textwrap
25 import pkg_resources
26
27 import enum
28 import prettytable
29 import six
30 import yaml
31
32 from xtesting.ci import tier_builder
33 from xtesting.core import testcase
34 from xtesting.utils import constants
35 from xtesting.utils import env
36
37 LOGGER = logging.getLogger('xtesting.ci.run_tests')
38
39
40 class Result(enum.Enum):
41     """The overall result in enumerated type"""
42     # pylint: disable=too-few-public-methods
43     EX_OK = os.EX_OK
44     EX_ERROR = -1
45
46
47 class BlockingTestFailed(Exception):
48     """Exception when the blocking test fails"""
49     pass
50
51
52 class TestNotEnabled(Exception):
53     """Exception when the test is not enabled"""
54     pass
55
56
57 class RunTestsParser(object):
58     """Parser to run tests"""
59     # pylint: disable=too-few-public-methods
60
61     def __init__(self):
62         self.parser = argparse.ArgumentParser()
63         self.parser.add_argument("-t", "--test", dest="test", action='store',
64                                  help="Test case or tier (group of tests) "
65                                  "to be executed. It will run all the test "
66                                  "if not specified.")
67         self.parser.add_argument("-n", "--noclean", help="Do not clean "
68                                  "OpenStack resources after running each "
69                                  "test (default=false).",
70                                  action="store_true")
71         self.parser.add_argument("-r", "--report", help="Push results to "
72                                  "database (default=false).",
73                                  action="store_true")
74
75     def parse_args(self, argv=None):
76         """Parse arguments.
77
78         It can call sys.exit if arguments are incorrect.
79
80         Returns:
81             the arguments from cmdline
82         """
83         return vars(self.parser.parse_args(argv))
84
85
86 class Runner(object):
87     """Runner class"""
88
89     def __init__(self):
90         self.executed_test_cases = {}
91         self.overall_result = Result.EX_OK
92         self.clean_flag = True
93         self.report_flag = False
94         self.tiers = tier_builder.TierBuilder(
95             env.get('INSTALLER_TYPE'),
96             env.get('DEPLOY_SCENARIO'),
97             pkg_resources.resource_filename('xtesting', 'ci/testcases.yaml'))
98
99     @staticmethod
100     def source_envfile(rc_file=constants.ENV_FILE):
101         """Source the env file passed as arg"""
102         if not os.path.isfile(rc_file):
103             LOGGER.debug("No env file %s found", rc_file)
104             return
105         with open(rc_file, "r") as rcfd:
106             for line in rcfd:
107                 var = (line.rstrip('"\n').replace('export ', '').split(
108                     "=") if re.search(r'(.*)=(.*)', line) else None)
109                 # The two next lines should be modified as soon as rc_file
110                 # conforms with common rules. Be aware that it could induce
111                 # issues if value starts with '
112                 if var:
113                     key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
114                     value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
115                     os.environ[key] = value
116             rcfd.seek(0, 0)
117             LOGGER.info("Sourcing env file %s\n\n%s", rc_file, rcfd.read())
118
119     @staticmethod
120     def get_dict_by_test(testname):
121         # pylint: disable=bad-continuation,missing-docstring
122         with open(pkg_resources.resource_filename(
123                 'xtesting', 'ci/testcases.yaml')) as tyaml:
124             testcases_yaml = yaml.safe_load(tyaml)
125         for dic_tier in testcases_yaml.get("tiers"):
126             for dic_testcase in dic_tier['testcases']:
127                 if dic_testcase['case_name'] == testname:
128                     return dic_testcase
129         LOGGER.error('Project %s is not defined in testcases.yaml', testname)
130         return None
131
132     @staticmethod
133     def get_run_dict(testname):
134         """Obtain the 'run' block of the testcase from testcases.yaml"""
135         try:
136             dic_testcase = Runner.get_dict_by_test(testname)
137             if not dic_testcase:
138                 LOGGER.error("Cannot get %s's config options", testname)
139             elif 'run' in dic_testcase:
140                 return dic_testcase['run']
141             return None
142         except Exception:  # pylint: disable=broad-except
143             LOGGER.exception("Cannot get %s's config options", testname)
144             return None
145
146     def run_test(self, test):
147         """Run one test case"""
148         if not test.is_enabled():
149             raise TestNotEnabled(
150                 "The test case {} is not enabled".format(test.get_name()))
151         LOGGER.info("Running test case '%s'...", test.get_name())
152         result = testcase.TestCase.EX_RUN_ERROR
153         run_dict = self.get_run_dict(test.get_name())
154         if run_dict:
155             try:
156                 module = importlib.import_module(run_dict['module'])
157                 cls = getattr(module, run_dict['class'])
158                 test_dict = Runner.get_dict_by_test(test.get_name())
159                 test_case = cls(**test_dict)
160                 self.executed_test_cases[test.get_name()] = test_case
161                 try:
162                     kwargs = run_dict['args']
163                     test_case.run(**kwargs)
164                 except KeyError:
165                     test_case.run()
166                 if self.report_flag:
167                     test_case.push_to_db()
168                 result = test_case.is_successful()
169                 LOGGER.info("Test result:\n\n%s\n", test_case)
170                 if self.clean_flag:
171                     test_case.clean()
172             except ImportError:
173                 LOGGER.exception("Cannot import module %s", run_dict['module'])
174             except AttributeError:
175                 LOGGER.exception("Cannot get class %s", run_dict['class'])
176         else:
177             raise Exception("Cannot import the class for the test case.")
178         return result
179
180     def run_tier(self, tier):
181         """Run one tier"""
182         tier_name = tier.get_name()
183         tests = tier.get_tests()
184         if not tests:
185             LOGGER.info("There are no supported test cases in this tier "
186                         "for the given scenario")
187             self.overall_result = Result.EX_ERROR
188         else:
189             LOGGER.info("Running tier '%s'", tier_name)
190             for test in tests:
191                 self.run_test(test)
192                 test_case = self.executed_test_cases[test.get_name()]
193                 if test_case.is_successful() != testcase.TestCase.EX_OK:
194                     LOGGER.error("The test case '%s' failed.", test.get_name())
195                     if test.get_project() == "xtesting":
196                         self.overall_result = Result.EX_ERROR
197                     if test.is_blocking():
198                         raise BlockingTestFailed(
199                             "The test case {} failed and is blocking".format(
200                                 test.get_name()))
201         return self.overall_result
202
203     def run_all(self):
204         """Run all available testcases"""
205         tiers_to_run = []
206         msg = prettytable.PrettyTable(
207             header_style='upper', padding_width=5,
208             field_names=['tiers', 'order', 'CI Loop', 'description',
209                          'testcases'])
210         for tier in self.tiers.get_tiers():
211             ci_loop = env.get('CI_LOOP')
212             if (tier.get_tests() and
213                     re.search(ci_loop, tier.get_ci_loop()) is not None):
214                 tiers_to_run.append(tier)
215                 msg.add_row([tier.get_name(), tier.get_order(),
216                              tier.get_ci_loop(),
217                              textwrap.fill(tier.description, width=40),
218                              textwrap.fill(' '.join([str(x.get_name(
219                                  )) for x in tier.get_tests()]), width=40)])
220         LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
221         for tier in tiers_to_run:
222             self.run_tier(tier)
223
224     def main(self, **kwargs):
225         """Entry point of class Runner"""
226         if 'noclean' in kwargs:
227             self.clean_flag = not kwargs['noclean']
228         if 'report' in kwargs:
229             self.report_flag = kwargs['report']
230         try:
231             LOGGER.info("Deployment description:\n\n%s\n", env.string())
232             self.source_envfile()
233             if 'test' in kwargs:
234                 LOGGER.debug("Test args: %s", kwargs['test'])
235                 if self.tiers.get_tier(kwargs['test']):
236                     self.run_tier(self.tiers.get_tier(kwargs['test']))
237                 elif self.tiers.get_test(kwargs['test']):
238                     result = self.run_test(
239                         self.tiers.get_test(kwargs['test']))
240                     if result != testcase.TestCase.EX_OK:
241                         LOGGER.error("The test case '%s' failed.",
242                                      kwargs['test'])
243                         self.overall_result = Result.EX_ERROR
244                 elif kwargs['test'] == "all":
245                     self.run_all()
246                 else:
247                     LOGGER.error("Unknown test case or tier '%s', or not "
248                                  "supported by the given scenario '%s'.",
249                                  kwargs['test'],
250                                  env.get('DEPLOY_SCENARIO'))
251                     LOGGER.debug("Available tiers are:\n\n%s",
252                                  self.tiers)
253                     return Result.EX_ERROR
254             else:
255                 self.run_all()
256         except BlockingTestFailed:
257             pass
258         except Exception:  # pylint: disable=broad-except
259             LOGGER.exception("Failures when running testcase(s)")
260             self.overall_result = Result.EX_ERROR
261         if not self.tiers.get_test(kwargs['test']):
262             self.summary(self.tiers.get_tier(kwargs['test']))
263         LOGGER.info("Execution exit value: %s", self.overall_result)
264         return self.overall_result
265
266     def summary(self, tier=None):
267         """To generate xtesting report showing the overall results"""
268         msg = prettytable.PrettyTable(
269             header_style='upper', padding_width=5,
270             field_names=['test case', 'project', 'tier',
271                          'duration', 'result'])
272         tiers = [tier] if tier else self.tiers.get_tiers()
273         for each_tier in tiers:
274             for test in each_tier.get_tests():
275                 try:
276                     test_case = self.executed_test_cases[test.get_name()]
277                 except KeyError:
278                     msg.add_row([test.get_name(), test.get_project(),
279                                  each_tier.get_name(), "00:00", "SKIP"])
280                 else:
281                     result = 'PASS' if(test_case.is_successful(
282                         ) == test_case.EX_OK) else 'FAIL'
283                     msg.add_row(
284                         [test_case.case_name, test_case.project_name,
285                          self.tiers.get_tier_name(test_case.case_name),
286                          test_case.get_duration(), result])
287             for test in each_tier.get_skipped_test():
288                 msg.add_row([test.get_name(), test.get_project(),
289                              each_tier.get_name(), "00:00", "SKIP"])
290         LOGGER.info("Xtesting report:\n\n%s\n", msg)
291
292
293 def main():
294     """Entry point"""
295     try:
296         os.makedirs('/var/lib/xtesting/results/')
297     except OSError as ex:
298         if ex.errno != errno.EEXIST:
299             six.print_("Cannot create /var/lib/xtesting/results/")
300             return testcase.TestCase.EX_RUN_ERROR
301     logging.config.fileConfig(pkg_resources.resource_filename(
302         'xtesting', 'ci/logging.ini'))
303     logging.captureWarnings(True)
304     parser = RunTestsParser()
305     args = parser.parse_args(sys.argv[1:])
306     runner = Runner()
307     return runner.main(**args).value