Merge "Fix typos in test_details.rst and test_overview.rst"
[functest.git] / functest / ci / run_tests.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2016 Ericsson AB and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 """ The entry of running tests:
11 1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
14 """
15
16 import argparse
17 import enum
18 import importlib
19 import logging
20 import logging.config
21 import os
22 import re
23 import sys
24 import textwrap
25 import pkg_resources
26
27 import prettytable
28
29 import functest.ci.tier_builder as tb
30 import functest.core.testcase as testcase
31 import functest.utils.functest_utils as ft_utils
32 import functest.utils.openstack_utils as os_utils
33 from functest.utils.constants import CONST
34
35 # __name__ cannot be used here
36 LOGGER = logging.getLogger('functest.ci.run_tests')
37
38 CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
39     'functest', 'ci/config_functest.yaml')
40
41
42 class Result(enum.Enum):
43     """The overall result in enumerated type"""
44     # pylint: disable=too-few-public-methods
45     EX_OK = os.EX_OK
46     EX_ERROR = -1
47
48
49 class BlockingTestFailed(Exception):
50     """Exception when the blocking test fails"""
51     pass
52
53
54 class TestNotEnabled(Exception):
55     """Exception when the test is not enabled"""
56     pass
57
58
59 class RunTestsParser(object):
60     """Parser to run tests"""
61     # pylint: disable=too-few-public-methods
62
63     def __init__(self):
64         self.parser = argparse.ArgumentParser()
65         self.parser.add_argument("-t", "--test", dest="test", action='store',
66                                  help="Test case or tier (group of tests) "
67                                  "to be executed. It will run all the test "
68                                  "if not specified.")
69         self.parser.add_argument("-n", "--noclean", help="Do not clean "
70                                  "OpenStack resources after running each "
71                                  "test (default=false).",
72                                  action="store_true")
73         self.parser.add_argument("-r", "--report", help="Push results to "
74                                  "database (default=false).",
75                                  action="store_true")
76
77     def parse_args(self, argv=None):
78         """Parse arguments.
79
80         It can call sys.exit if arguments are incorrect.
81
82         Returns:
83             the arguments from cmdline
84         """
85         return vars(self.parser.parse_args(argv))
86
87
88 class Runner(object):
89     """Runner class"""
90
91     def __init__(self):
92         self.executed_test_cases = {}
93         self.overall_result = Result.EX_OK
94         self.clean_flag = True
95         self.report_flag = False
96         self._tiers = tb.TierBuilder(
97             CONST.__getattribute__('INSTALLER_TYPE'),
98             CONST.__getattribute__('DEPLOY_SCENARIO'),
99             pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
100
101     @staticmethod
102     def source_rc_file():
103         """Set the environmental vars from openstack.creds"""
104
105         rc_file = CONST.__getattribute__('openstack_creds')
106         if not os.path.isfile(rc_file):
107             raise Exception("RC file %s does not exist..." % rc_file)
108         LOGGER.debug("Sourcing the OpenStack RC file...")
109         os_utils.source_credentials(rc_file)
110
111     @staticmethod
112     def get_run_dict(testname):
113         """Obtain the the 'run' block of the testcase from testcases.yaml"""
114         try:
115             dic_testcase = ft_utils.get_dict_by_test(testname)
116             if not dic_testcase:
117                 LOGGER.error("Cannot get %s's config options", testname)
118             elif 'run' in dic_testcase:
119                 return dic_testcase['run']
120             return None
121         except Exception:  # pylint: disable=broad-except
122             LOGGER.exception("Cannot get %s's config options", testname)
123             return None
124
125     def run_test(self, test):
126         """Run one test case"""
127         if not test.is_enabled():
128             raise TestNotEnabled(
129                 "The test case {} is not enabled".format(test.get_name()))
130         LOGGER.info("Running test case '%s'...", test.get_name())
131         result = testcase.TestCase.EX_RUN_ERROR
132         run_dict = self.get_run_dict(test.get_name())
133         if run_dict:
134             try:
135                 module = importlib.import_module(run_dict['module'])
136                 cls = getattr(module, run_dict['class'])
137                 test_dict = ft_utils.get_dict_by_test(test.get_name())
138                 test_case = cls(**test_dict)
139                 self.executed_test_cases[test.get_name()] = test_case
140                 try:
141                     kwargs = run_dict['args']
142                     test_case.run(**kwargs)
143                 except KeyError:
144                     test_case.run()
145                 if self.report_flag:
146                     test_case.push_to_db()
147                 if test.get_project() == "functest":
148                     result = test_case.is_successful()
149                 else:
150                     result = testcase.TestCase.EX_OK
151                 LOGGER.info("Test result:\n\n%s\n", test_case)
152                 if self.clean_flag:
153                     test_case.clean()
154             except ImportError:
155                 LOGGER.exception("Cannot import module %s", run_dict['module'])
156             except AttributeError:
157                 LOGGER.exception("Cannot get class %s", run_dict['class'])
158         else:
159             raise Exception("Cannot import the class for the test case.")
160         return result
161
162     def run_tier(self, tier):
163         """Run one tier"""
164         tier_name = tier.get_name()
165         tests = tier.get_tests()
166         if not tests:
167             LOGGER.info("There are no supported test cases in this tier "
168                         "for the given scenario")
169             self.overall_result = Result.EX_ERROR
170         else:
171             LOGGER.info("Running tier '%s'", tier_name)
172             for test in tests:
173                 self.run_test(test)
174                 test_case = self.executed_test_cases[test.get_name()]
175                 if test_case.is_successful() != testcase.TestCase.EX_OK:
176                     LOGGER.error("The test case '%s' failed.", test.get_name())
177                     if test.get_project() == "functest":
178                         self.overall_result = Result.EX_ERROR
179                     if test.is_blocking():
180                         raise BlockingTestFailed(
181                             "The test case {} failed and is blocking".format(
182                                 test.get_name()))
183         return self.overall_result
184
185     def run_all(self):
186         """Run all available testcases"""
187         tiers_to_run = []
188         msg = prettytable.PrettyTable(
189             header_style='upper', padding_width=5,
190             field_names=['tiers', 'order', 'CI Loop', 'description',
191                          'testcases'])
192         for tier in self._tiers.get_tiers():
193             if (tier.get_tests() and
194                     re.search(CONST.__getattribute__('CI_LOOP'),
195                               tier.get_ci_loop()) is not None):
196                 tiers_to_run.append(tier)
197                 msg.add_row([tier.get_name(), tier.get_order(),
198                              tier.get_ci_loop(),
199                              textwrap.fill(tier.description, width=40),
200                              textwrap.fill(' '.join([str(x.get_name(
201                                  )) for x in tier.get_tests()]), width=40)])
202         LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
203         for tier in tiers_to_run:
204             self.run_tier(tier)
205
206     def main(self, **kwargs):
207         """Entry point of class Runner"""
208         if 'noclean' in kwargs:
209             self.clean_flag = not kwargs['noclean']
210         if 'report' in kwargs:
211             self.report_flag = kwargs['report']
212         try:
213             if 'test' in kwargs:
214                 self.source_rc_file()
215                 LOGGER.debug("Test args: %s", kwargs['test'])
216                 if self._tiers.get_tier(kwargs['test']):
217                     self.run_tier(self._tiers.get_tier(kwargs['test']))
218                 elif self._tiers.get_test(kwargs['test']):
219                     result = self.run_test(
220                         self._tiers.get_test(kwargs['test']))
221                     if result != testcase.TestCase.EX_OK:
222                         LOGGER.error("The test case '%s' failed.",
223                                      kwargs['test'])
224                         self.overall_result = Result.EX_ERROR
225                 elif kwargs['test'] == "all":
226                     self.run_all()
227                 else:
228                     LOGGER.error("Unknown test case or tier '%s', or not "
229                                  "supported by the given scenario '%s'.",
230                                  kwargs['test'],
231                                  CONST.__getattribute__('DEPLOY_SCENARIO'))
232                     LOGGER.debug("Available tiers are:\n\n%s",
233                                  self._tiers)
234                     return Result.EX_ERROR
235             else:
236                 self.run_all()
237         except BlockingTestFailed:
238             pass
239         except Exception:  # pylint: disable=broad-except
240             LOGGER.exception("Failures when running testcase(s)")
241             self.overall_result = Result.EX_ERROR
242         if not self._tiers.get_test(kwargs['test']):
243             self.summary(self._tiers.get_tier(kwargs['test']))
244         LOGGER.info("Execution exit value: %s", self.overall_result)
245         return self.overall_result
246
247     def summary(self, tier=None):
248         """To generate functest report showing the overall results"""
249         msg = prettytable.PrettyTable(
250             header_style='upper', padding_width=5,
251             field_names=['env var', 'value'])
252         for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
253                         'CI_LOOP']:
254             msg.add_row([env_var, CONST.__getattribute__(env_var)])
255         LOGGER.info("Deployment description:\n\n%s\n", msg)
256         msg = prettytable.PrettyTable(
257             header_style='upper', padding_width=5,
258             field_names=['test case', 'project', 'tier',
259                          'duration', 'result'])
260         tiers = [tier] if tier else self._tiers.get_tiers()
261         for each_tier in tiers:
262             for test in each_tier.get_tests():
263                 try:
264                     test_case = self.executed_test_cases[test.get_name()]
265                 except KeyError:
266                     msg.add_row([test.get_name(), test.get_project(),
267                                  each_tier.get_name(), "00:00", "SKIP"])
268                 else:
269                     result = 'PASS' if(test_case.is_successful(
270                         ) == test_case.EX_OK) else 'FAIL'
271                     msg.add_row(
272                         [test_case.case_name, test_case.project_name,
273                          self._tiers.get_tier_name(test_case.case_name),
274                          test_case.get_duration(), result])
275             for test in each_tier.get_skipped_test():
276                 msg.add_row([test.get_name(), test.get_project(),
277                              each_tier.get_name(), "00:00", "SKIP"])
278         LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
279
280
281 def main():
282     """Entry point"""
283     logging.config.fileConfig(pkg_resources.resource_filename(
284         'functest', 'ci/logging.ini'))
285     logging.captureWarnings(True)
286     parser = RunTestsParser()
287     args = parser.parse_args(sys.argv[1:])
288     runner = Runner()
289     return runner.main(**args).value