Add xtesting in upper-constraints.txt
[functest.git] / functest / ci / run_tests.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2016 Ericsson AB and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 """ The entry of running tests:
11 1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
14 """
15
16 import argparse
17 import importlib
18 import logging
19 import logging.config
20 import os
21 import re
22 import sys
23 import textwrap
24 import pkg_resources
25
26 import enum
27 import prettytable
28 import yaml
29
30 from functest.ci import tier_builder
31 from functest.core import testcase
32 from functest.utils import constants
33 from functest.utils import env
34
35 LOGGER = logging.getLogger('functest.ci.run_tests')
36
37
38 class Result(enum.Enum):
39     """The overall result in enumerated type"""
40     # pylint: disable=too-few-public-methods
41     EX_OK = os.EX_OK
42     EX_ERROR = -1
43
44
45 class BlockingTestFailed(Exception):
46     """Exception when the blocking test fails"""
47     pass
48
49
50 class TestNotEnabled(Exception):
51     """Exception when the test is not enabled"""
52     pass
53
54
55 class RunTestsParser(object):
56     """Parser to run tests"""
57     # pylint: disable=too-few-public-methods
58
59     def __init__(self):
60         self.parser = argparse.ArgumentParser()
61         self.parser.add_argument("-t", "--test", dest="test", action='store',
62                                  help="Test case or tier (group of tests) "
63                                  "to be executed. It will run all the test "
64                                  "if not specified.")
65         self.parser.add_argument("-n", "--noclean", help="Do not clean "
66                                  "OpenStack resources after running each "
67                                  "test (default=false).",
68                                  action="store_true")
69         self.parser.add_argument("-r", "--report", help="Push results to "
70                                  "database (default=false).",
71                                  action="store_true")
72
73     def parse_args(self, argv=None):
74         """Parse arguments.
75
76         It can call sys.exit if arguments are incorrect.
77
78         Returns:
79             the arguments from cmdline
80         """
81         return vars(self.parser.parse_args(argv))
82
83
84 class Runner(object):
85     """Runner class"""
86
87     def __init__(self):
88         self.executed_test_cases = {}
89         self.overall_result = Result.EX_OK
90         self.clean_flag = True
91         self.report_flag = False
92         self.tiers = tier_builder.TierBuilder(
93             env.get('INSTALLER_TYPE'),
94             env.get('DEPLOY_SCENARIO'),
95             pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
96
97     @staticmethod
98     def source_envfile(rc_file=constants.ENV_FILE):
99         """Source the env file passed as arg"""
100         if not os.path.isfile(rc_file):
101             LOGGER.debug("No env file %s found", rc_file)
102             return
103         with open(rc_file, "r") as rcfd:
104             for line in rcfd:
105                 var = (line.rstrip('"\n').replace('export ', '').split(
106                     "=") if re.search(r'(.*)=(.*)', line) else None)
107                 # The two next lines should be modified as soon as rc_file
108                 # conforms with common rules. Be aware that it could induce
109                 # issues if value starts with '
110                 if var:
111                     key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
112                     value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
113                     os.environ[key] = value
114             rcfd.seek(0, 0)
115             LOGGER.info("Sourcing env file %s\n\n%s", rc_file, rcfd.read())
116
117     @staticmethod
118     def get_dict_by_test(testname):
119         # pylint: disable=bad-continuation,missing-docstring
120         with open(pkg_resources.resource_filename(
121                 'functest', 'ci/testcases.yaml')) as tyaml:
122             testcases_yaml = yaml.safe_load(tyaml)
123         for dic_tier in testcases_yaml.get("tiers"):
124             for dic_testcase in dic_tier['testcases']:
125                 if dic_testcase['case_name'] == testname:
126                     return dic_testcase
127         LOGGER.error('Project %s is not defined in testcases.yaml', testname)
128         return None
129
130     @staticmethod
131     def get_run_dict(testname):
132         """Obtain the 'run' block of the testcase from testcases.yaml"""
133         try:
134             dic_testcase = Runner.get_dict_by_test(testname)
135             if not dic_testcase:
136                 LOGGER.error("Cannot get %s's config options", testname)
137             elif 'run' in dic_testcase:
138                 return dic_testcase['run']
139             return None
140         except Exception:  # pylint: disable=broad-except
141             LOGGER.exception("Cannot get %s's config options", testname)
142             return None
143
144     def run_test(self, test):
145         """Run one test case"""
146         if not test.is_enabled():
147             raise TestNotEnabled(
148                 "The test case {} is not enabled".format(test.get_name()))
149         LOGGER.info("Running test case '%s'...", test.get_name())
150         result = testcase.TestCase.EX_RUN_ERROR
151         run_dict = self.get_run_dict(test.get_name())
152         if run_dict:
153             try:
154                 module = importlib.import_module(run_dict['module'])
155                 cls = getattr(module, run_dict['class'])
156                 test_dict = Runner.get_dict_by_test(test.get_name())
157                 test_case = cls(**test_dict)
158                 self.executed_test_cases[test.get_name()] = test_case
159                 try:
160                     kwargs = run_dict['args']
161                     test_case.run(**kwargs)
162                 except KeyError:
163                     test_case.run()
164                 if self.report_flag:
165                     test_case.push_to_db()
166                 if test.get_project() == "functest":
167                     result = test_case.is_successful()
168                 else:
169                     result = testcase.TestCase.EX_OK
170                 LOGGER.info("Test result:\n\n%s\n", test_case)
171                 if self.clean_flag:
172                     test_case.clean()
173             except ImportError:
174                 LOGGER.exception("Cannot import module %s", run_dict['module'])
175             except AttributeError:
176                 LOGGER.exception("Cannot get class %s", run_dict['class'])
177         else:
178             raise Exception("Cannot import the class for the test case.")
179         return result
180
181     def run_tier(self, tier):
182         """Run one tier"""
183         tier_name = tier.get_name()
184         tests = tier.get_tests()
185         if not tests:
186             LOGGER.info("There are no supported test cases in this tier "
187                         "for the given scenario")
188             self.overall_result = Result.EX_ERROR
189         else:
190             LOGGER.info("Running tier '%s'", tier_name)
191             for test in tests:
192                 self.run_test(test)
193                 test_case = self.executed_test_cases[test.get_name()]
194                 if test_case.is_successful() != testcase.TestCase.EX_OK:
195                     LOGGER.error("The test case '%s' failed.", test.get_name())
196                     if test.get_project() == "functest":
197                         self.overall_result = Result.EX_ERROR
198                     if test.is_blocking():
199                         raise BlockingTestFailed(
200                             "The test case {} failed and is blocking".format(
201                                 test.get_name()))
202         return self.overall_result
203
204     def run_all(self):
205         """Run all available testcases"""
206         tiers_to_run = []
207         msg = prettytable.PrettyTable(
208             header_style='upper', padding_width=5,
209             field_names=['tiers', 'order', 'CI Loop', 'description',
210                          'testcases'])
211         for tier in self.tiers.get_tiers():
212             ci_loop = env.get('CI_LOOP')
213             if (tier.get_tests() and
214                     re.search(ci_loop, tier.get_ci_loop()) is not None):
215                 tiers_to_run.append(tier)
216                 msg.add_row([tier.get_name(), tier.get_order(),
217                              tier.get_ci_loop(),
218                              textwrap.fill(tier.description, width=40),
219                              textwrap.fill(' '.join([str(x.get_name(
220                                  )) for x in tier.get_tests()]), width=40)])
221         LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
222         for tier in tiers_to_run:
223             self.run_tier(tier)
224
225     def main(self, **kwargs):
226         """Entry point of class Runner"""
227         if 'noclean' in kwargs:
228             self.clean_flag = not kwargs['noclean']
229         if 'report' in kwargs:
230             self.report_flag = kwargs['report']
231         try:
232             LOGGER.info("Deployment description:\n\n%s\n", env.string())
233             self.source_envfile()
234             if 'test' in kwargs:
235                 LOGGER.debug("Test args: %s", kwargs['test'])
236                 if self.tiers.get_tier(kwargs['test']):
237                     self.run_tier(self.tiers.get_tier(kwargs['test']))
238                 elif self.tiers.get_test(kwargs['test']):
239                     result = self.run_test(
240                         self.tiers.get_test(kwargs['test']))
241                     if result != testcase.TestCase.EX_OK:
242                         LOGGER.error("The test case '%s' failed.",
243                                      kwargs['test'])
244                         self.overall_result = Result.EX_ERROR
245                 elif kwargs['test'] == "all":
246                     self.run_all()
247                 else:
248                     LOGGER.error("Unknown test case or tier '%s', or not "
249                                  "supported by the given scenario '%s'.",
250                                  kwargs['test'],
251                                  env.get('DEPLOY_SCENARIO'))
252                     LOGGER.debug("Available tiers are:\n\n%s",
253                                  self.tiers)
254                     return Result.EX_ERROR
255             else:
256                 self.run_all()
257         except BlockingTestFailed:
258             pass
259         except Exception:  # pylint: disable=broad-except
260             LOGGER.exception("Failures when running testcase(s)")
261             self.overall_result = Result.EX_ERROR
262         if not self.tiers.get_test(kwargs['test']):
263             self.summary(self.tiers.get_tier(kwargs['test']))
264         LOGGER.info("Execution exit value: %s", self.overall_result)
265         return self.overall_result
266
267     def summary(self, tier=None):
268         """To generate functest report showing the overall results"""
269         msg = prettytable.PrettyTable(
270             header_style='upper', padding_width=5,
271             field_names=['test case', 'project', 'tier',
272                          'duration', 'result'])
273         tiers = [tier] if tier else self.tiers.get_tiers()
274         for each_tier in tiers:
275             for test in each_tier.get_tests():
276                 try:
277                     test_case = self.executed_test_cases[test.get_name()]
278                 except KeyError:
279                     msg.add_row([test.get_name(), test.get_project(),
280                                  each_tier.get_name(), "00:00", "SKIP"])
281                 else:
282                     result = 'PASS' if(test_case.is_successful(
283                         ) == test_case.EX_OK) else 'FAIL'
284                     msg.add_row(
285                         [test_case.case_name, test_case.project_name,
286                          self.tiers.get_tier_name(test_case.case_name),
287                          test_case.get_duration(), result])
288             for test in each_tier.get_skipped_test():
289                 msg.add_row([test.get_name(), test.get_project(),
290                              each_tier.get_name(), "00:00", "SKIP"])
291         LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
292
293
294 def main():
295     """Entry point"""
296     logging.config.fileConfig(pkg_resources.resource_filename(
297         'functest', 'ci/logging.ini'))
298     logging.captureWarnings(True)
299     parser = RunTestsParser()
300     args = parser.parse_args(sys.argv[1:])
301     runner = Runner()
302     return runner.main(**args).value