Merge "Improve the pylint score of functest-core"
[functest.git] / functest / ci / run_tests.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2016 Ericsson AB and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 """ The entry of running tests:
11 1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
14 """
15
16 import argparse
17 import enum
18 import importlib
19 import logging
20 import logging.config
21 import os
22 import re
23 import sys
24 import textwrap
25 import pkg_resources
26
27 import prettytable
28 import six
29
30 import functest.ci.tier_builder as tb
31 import functest.core.testcase as testcase
32 import functest.utils.functest_utils as ft_utils
33 import functest.utils.openstack_utils as os_utils
34 from functest.utils.constants import CONST
35
36 # __name__ cannot be used here
37 LOGGER = logging.getLogger('functest.ci.run_tests')
38
39 CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
40     'functest', 'ci/config_functest.yaml')
41
42
43 class Result(enum.Enum):
44     """The overall result in enumerated type"""
45     # pylint: disable=too-few-public-methods
46     EX_OK = os.EX_OK
47     EX_ERROR = -1
48
49
50 class BlockingTestFailed(Exception):
51     """Exception when the blocking test fails"""
52     pass
53
54
55 class TestNotEnabled(Exception):
56     """Exception when the test is not enabled"""
57     pass
58
59
60 class RunTestsParser(object):
61     """Parser to run tests"""
62     # pylint: disable=too-few-public-methods
63
64     def __init__(self):
65         self.parser = argparse.ArgumentParser()
66         self.parser.add_argument("-t", "--test", dest="test", action='store',
67                                  help="Test case or tier (group of tests) "
68                                  "to be executed. It will run all the test "
69                                  "if not specified.")
70         self.parser.add_argument("-n", "--noclean", help="Do not clean "
71                                  "OpenStack resources after running each "
72                                  "test (default=false).",
73                                  action="store_true")
74         self.parser.add_argument("-r", "--report", help="Push results to "
75                                  "database (default=false).",
76                                  action="store_true")
77
78     def parse_args(self, argv=None):
79         """Parse arguments.
80
81         It can call sys.exit if arguments are incorrect.
82
83         Returns:
84             the arguments from cmdline
85         """
86         return vars(self.parser.parse_args(argv))
87
88
89 class Runner(object):
90     """Runner class"""
91
92     def __init__(self):
93         self.executed_test_cases = {}
94         self.overall_result = Result.EX_OK
95         self.clean_flag = True
96         self.report_flag = False
97         self._tiers = tb.TierBuilder(
98             CONST.__getattribute__('INSTALLER_TYPE'),
99             CONST.__getattribute__('DEPLOY_SCENARIO'),
100             pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
101
102     @staticmethod
103     def source_rc_file():
104         """Set the environmental vars from openstack.creds"""
105
106         rc_file = CONST.__getattribute__('openstack_creds')
107         if not os.path.isfile(rc_file):
108             raise Exception("RC file %s does not exist..." % rc_file)
109         LOGGER.debug("Sourcing the OpenStack RC file...")
110         os_utils.source_credentials(rc_file)
111         for key, value in six.iteritems(os.environ):
112             if re.search("OS_", key):
113                 if key == 'OS_AUTH_URL':
114                     CONST.__setattr__('OS_AUTH_URL', value)
115                 elif key == 'OS_USERNAME':
116                     CONST.__setattr__('OS_USERNAME', value)
117                 elif key == 'OS_TENANT_NAME':
118                     CONST.__setattr__('OS_TENANT_NAME', value)
119                 elif key == 'OS_PASSWORD':
120                     CONST.__setattr__('OS_PASSWORD', value)
121                 elif key == "OS_PROJECT_DOMAIN_NAME":
122                     CONST.__setattr__('OS_PROJECT_DOMAIN_NAME', value)
123
124     @staticmethod
125     def get_run_dict(testname):
126         """Obtain the the 'run' block of the testcase from testcases.yaml"""
127         try:
128             dic_testcase = ft_utils.get_dict_by_test(testname)
129             if not dic_testcase:
130                 LOGGER.error("Cannot get %s's config options", testname)
131             elif 'run' in dic_testcase:
132                 return dic_testcase['run']
133             return None
134         except Exception:  # pylint: disable=broad-except
135             LOGGER.exception("Cannot get %s's config options", testname)
136             return None
137
138     def run_test(self, test):
139         """Run one test case"""
140         if not test.is_enabled():
141             raise TestNotEnabled(
142                 "The test case {} is not enabled".format(test.get_name()))
143         LOGGER.info("Running test case '%s'...", test.get_name())
144         result = testcase.TestCase.EX_RUN_ERROR
145         run_dict = self.get_run_dict(test.get_name())
146         if run_dict:
147             try:
148                 module = importlib.import_module(run_dict['module'])
149                 cls = getattr(module, run_dict['class'])
150                 test_dict = ft_utils.get_dict_by_test(test.get_name())
151                 test_case = cls(**test_dict)
152                 self.executed_test_cases[test.get_name()] = test_case
153                 if self.clean_flag:
154                     if test_case.create_snapshot() != test_case.EX_OK:
155                         return testcase.TestCase.EX_RUN_ERROR
156                 try:
157                     kwargs = run_dict['args']
158                     test_case.run(**kwargs)
159                 except KeyError:
160                     test_case.run()
161                 if self.report_flag:
162                     test_case.push_to_db()
163                 if test.get_project() == "functest":
164                     result = test_case.is_successful()
165                 else:
166                     result = testcase.TestCase.EX_OK
167                 LOGGER.info("Test result:\n\n%s\n", test_case)
168                 if self.clean_flag:
169                     test_case.clean()
170             except ImportError:
171                 LOGGER.exception("Cannot import module %s", run_dict['module'])
172             except AttributeError:
173                 LOGGER.exception("Cannot get class %s", run_dict['class'])
174         else:
175             raise Exception("Cannot import the class for the test case.")
176         return result
177
178     def run_tier(self, tier):
179         """Run one tier"""
180         tier_name = tier.get_name()
181         tests = tier.get_tests()
182         if not tests:
183             LOGGER.info("There are no supported test cases in this tier "
184                         "for the given scenario")
185             self.overall_result = Result.EX_ERROR
186         else:
187             LOGGER.info("Running tier '%s'", tier_name)
188             for test in tests:
189                 self.run_test(test)
190                 test_case = self.executed_test_cases[test.get_name()]
191                 if test_case.is_successful() != testcase.TestCase.EX_OK:
192                     LOGGER.error("The test case '%s' failed.", test.get_name())
193                     if test.get_project() == "functest":
194                         self.overall_result = Result.EX_ERROR
195                     if test.is_blocking():
196                         raise BlockingTestFailed(
197                             "The test case {} failed and is blocking".format(
198                                 test.get_name()))
199         return self.overall_result
200
201     def run_all(self):
202         """Run all available testcases"""
203         tiers_to_run = []
204         msg = prettytable.PrettyTable(
205             header_style='upper', padding_width=5,
206             field_names=['tiers', 'order', 'CI Loop', 'description',
207                          'testcases'])
208         for tier in self._tiers.get_tiers():
209             if (tier.get_tests() and
210                     re.search(CONST.__getattribute__('CI_LOOP'),
211                               tier.get_ci_loop()) is not None):
212                 tiers_to_run.append(tier)
213                 msg.add_row([tier.get_name(), tier.get_order(),
214                              tier.get_ci_loop(),
215                              textwrap.fill(tier.description, width=40),
216                              textwrap.fill(' '.join([str(x.get_name(
217                                  )) for x in tier.get_tests()]), width=40)])
218         LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
219         for tier in tiers_to_run:
220             self.run_tier(tier)
221
222     def main(self, **kwargs):
223         """Entry point of class Runner"""
224         if 'noclean' in kwargs:
225             self.clean_flag = not kwargs['noclean']
226         if 'report' in kwargs:
227             self.report_flag = kwargs['report']
228         try:
229             if 'test' in kwargs:
230                 self.source_rc_file()
231                 LOGGER.debug("Test args: %s", kwargs['test'])
232                 if self._tiers.get_tier(kwargs['test']):
233                     self.run_tier(self._tiers.get_tier(kwargs['test']))
234                 elif self._tiers.get_test(kwargs['test']):
235                     result = self.run_test(
236                         self._tiers.get_test(kwargs['test']))
237                     if result != testcase.TestCase.EX_OK:
238                         LOGGER.error("The test case '%s' failed.",
239                                      kwargs['test'])
240                         self.overall_result = Result.EX_ERROR
241                 elif kwargs['test'] == "all":
242                     self.run_all()
243                 else:
244                     LOGGER.error("Unknown test case or tier '%s', or not "
245                                  "supported by the given scenario '%s'.",
246                                  kwargs['test'],
247                                  CONST.__getattribute__('DEPLOY_SCENARIO'))
248                     LOGGER.debug("Available tiers are:\n\n%s",
249                                  self._tiers)
250                     return Result.EX_ERROR
251             else:
252                 self.run_all()
253         except BlockingTestFailed:
254             pass
255         except Exception:  # pylint: disable=broad-except
256             LOGGER.exception("Failures when running testcase(s)")
257             self.overall_result = Result.EX_ERROR
258         if not self._tiers.get_test(kwargs['test']):
259             self.summary(self._tiers.get_tier(kwargs['test']))
260         LOGGER.info("Execution exit value: %s", self.overall_result)
261         return self.overall_result
262
263     def summary(self, tier=None):
264         """To generate functest report showing the overall results"""
265         msg = prettytable.PrettyTable(
266             header_style='upper', padding_width=5,
267             field_names=['env var', 'value'])
268         for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
269                         'CI_LOOP']:
270             msg.add_row([env_var, CONST.__getattribute__(env_var)])
271         LOGGER.info("Deployment description:\n\n%s\n", msg)
272         msg = prettytable.PrettyTable(
273             header_style='upper', padding_width=5,
274             field_names=['test case', 'project', 'tier',
275                          'duration', 'result'])
276         tiers = [tier] if tier else self._tiers.get_tiers()
277         for each_tier in tiers:
278             for test in each_tier.get_tests():
279                 try:
280                     test_case = self.executed_test_cases[test.get_name()]
281                 except KeyError:
282                     msg.add_row([test.get_name(), test.get_project(),
283                                  each_tier.get_name(), "00:00", "SKIP"])
284                 else:
285                     result = 'PASS' if(test_case.is_successful(
286                         ) == test_case.EX_OK) else 'FAIL'
287                     msg.add_row(
288                         [test_case.case_name, test_case.project_name,
289                          self._tiers.get_tier_name(test_case.case_name),
290                          test_case.get_duration(), result])
291             for test in each_tier.get_skipped_test():
292                 msg.add_row([test.get_name(), test.get_project(),
293                              each_tier.get_name(), "00:00", "SKIP"])
294         LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
295
296
297 def main():
298     """Entry point"""
299     logging.config.fileConfig(pkg_resources.resource_filename(
300         'functest', 'ci/logging.ini'))
301     logging.captureWarnings(True)
302     parser = RunTestsParser()
303     args = parser.parse_args(sys.argv[1:])
304     runner = Runner()
305     return runner.main(**args).value