Merge "Install vEPC dependencies in Dockerfile"
[functest.git] / functest / ci / run_tests.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2016 Ericsson AB and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 """ The entry of running tests:
11 1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
14 """
15
16 import argparse
17 import importlib
18 import logging
19 import logging.config
20 import os
21 import re
22 import sys
23 import textwrap
24 import pkg_resources
25
26 import enum
27 import prettytable
28
29 import functest.ci.tier_builder as tb
30 import functest.core.testcase as testcase
31 import functest.utils.functest_utils as ft_utils
32 import functest.utils.openstack_utils as os_utils
33 from functest.utils.constants import CONST
34
35 # __name__ cannot be used here
36 LOGGER = logging.getLogger('functest.ci.run_tests')
37
38 CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
39     'functest', 'ci/config_functest.yaml')
40
41
42 class Result(enum.Enum):
43     """The overall result in enumerated type"""
44     # pylint: disable=too-few-public-methods
45     EX_OK = os.EX_OK
46     EX_ERROR = -1
47
48
49 class BlockingTestFailed(Exception):
50     """Exception when the blocking test fails"""
51     pass
52
53
54 class TestNotEnabled(Exception):
55     """Exception when the test is not enabled"""
56     pass
57
58
59 class RunTestsParser(object):
60     """Parser to run tests"""
61     # pylint: disable=too-few-public-methods
62
63     def __init__(self):
64         self.parser = argparse.ArgumentParser()
65         self.parser.add_argument("-t", "--test", dest="test", action='store',
66                                  help="Test case or tier (group of tests) "
67                                  "to be executed. It will run all the test "
68                                  "if not specified.")
69         self.parser.add_argument("-n", "--noclean", help="Do not clean "
70                                  "OpenStack resources after running each "
71                                  "test (default=false).",
72                                  action="store_true")
73         self.parser.add_argument("-r", "--report", help="Push results to "
74                                  "database (default=false).",
75                                  action="store_true")
76
77     def parse_args(self, argv=None):
78         """Parse arguments.
79
80         It can call sys.exit if arguments are incorrect.
81
82         Returns:
83             the arguments from cmdline
84         """
85         return vars(self.parser.parse_args(argv))
86
87
88 class Runner(object):
89     """Runner class"""
90
91     def __init__(self):
92         self.executed_test_cases = {}
93         self.overall_result = Result.EX_OK
94         self.clean_flag = True
95         self.report_flag = False
96         self.tiers = tb.TierBuilder(
97             CONST.__getattribute__('INSTALLER_TYPE'),
98             CONST.__getattribute__('DEPLOY_SCENARIO'),
99             pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
100
101     @staticmethod
102     def get_run_dict(testname):
103         """Obtain the 'run' block of the testcase from testcases.yaml"""
104         try:
105             dic_testcase = ft_utils.get_dict_by_test(testname)
106             if not dic_testcase:
107                 LOGGER.error("Cannot get %s's config options", testname)
108             elif 'run' in dic_testcase:
109                 return dic_testcase['run']
110             return None
111         except Exception:  # pylint: disable=broad-except
112             LOGGER.exception("Cannot get %s's config options", testname)
113             return None
114
115     def run_test(self, test):
116         """Run one test case"""
117         if not test.is_enabled():
118             raise TestNotEnabled(
119                 "The test case {} is not enabled".format(test.get_name()))
120         LOGGER.info("Running test case '%s'...", test.get_name())
121         result = testcase.TestCase.EX_RUN_ERROR
122         run_dict = self.get_run_dict(test.get_name())
123         if run_dict:
124             try:
125                 module = importlib.import_module(run_dict['module'])
126                 cls = getattr(module, run_dict['class'])
127                 test_dict = ft_utils.get_dict_by_test(test.get_name())
128                 test_case = cls(**test_dict)
129                 self.executed_test_cases[test.get_name()] = test_case
130                 try:
131                     kwargs = run_dict['args']
132                     test_case.run(**kwargs)
133                 except KeyError:
134                     test_case.run()
135                 if self.report_flag:
136                     test_case.push_to_db()
137                 if test.get_project() == "functest":
138                     result = test_case.is_successful()
139                 else:
140                     result = testcase.TestCase.EX_OK
141                 LOGGER.info("Test result:\n\n%s\n", test_case)
142                 if self.clean_flag:
143                     test_case.clean()
144             except ImportError:
145                 LOGGER.exception("Cannot import module %s", run_dict['module'])
146             except AttributeError:
147                 LOGGER.exception("Cannot get class %s", run_dict['class'])
148         else:
149             raise Exception("Cannot import the class for the test case.")
150         return result
151
152     def run_tier(self, tier):
153         """Run one tier"""
154         tier_name = tier.get_name()
155         tests = tier.get_tests()
156         if not tests:
157             LOGGER.info("There are no supported test cases in this tier "
158                         "for the given scenario")
159             self.overall_result = Result.EX_ERROR
160         else:
161             LOGGER.info("Running tier '%s'", tier_name)
162             for test in tests:
163                 self.run_test(test)
164                 test_case = self.executed_test_cases[test.get_name()]
165                 if test_case.is_successful() != testcase.TestCase.EX_OK:
166                     LOGGER.error("The test case '%s' failed.", test.get_name())
167                     if test.get_project() == "functest":
168                         self.overall_result = Result.EX_ERROR
169                     if test.is_blocking():
170                         raise BlockingTestFailed(
171                             "The test case {} failed and is blocking".format(
172                                 test.get_name()))
173         return self.overall_result
174
175     def run_all(self):
176         """Run all available testcases"""
177         tiers_to_run = []
178         msg = prettytable.PrettyTable(
179             header_style='upper', padding_width=5,
180             field_names=['tiers', 'order', 'CI Loop', 'description',
181                          'testcases'])
182         for tier in self.tiers.get_tiers():
183             if (tier.get_tests() and
184                     re.search(CONST.__getattribute__('CI_LOOP'),
185                               tier.get_ci_loop()) is not None):
186                 tiers_to_run.append(tier)
187                 msg.add_row([tier.get_name(), tier.get_order(),
188                              tier.get_ci_loop(),
189                              textwrap.fill(tier.description, width=40),
190                              textwrap.fill(' '.join([str(x.get_name(
191                                  )) for x in tier.get_tests()]), width=40)])
192         LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
193         for tier in tiers_to_run:
194             self.run_tier(tier)
195
196     def main(self, **kwargs):
197         """Entry point of class Runner"""
198         if 'noclean' in kwargs:
199             self.clean_flag = not kwargs['noclean']
200         if 'report' in kwargs:
201             self.report_flag = kwargs['report']
202         try:
203             if 'test' in kwargs:
204                 LOGGER.debug("Sourcing the credential file...")
205                 os_utils.source_credentials(CONST.__getattribute__('env_file'))
206
207                 LOGGER.debug("Test args: %s", kwargs['test'])
208                 if self.tiers.get_tier(kwargs['test']):
209                     self.run_tier(self.tiers.get_tier(kwargs['test']))
210                 elif self.tiers.get_test(kwargs['test']):
211                     result = self.run_test(
212                         self.tiers.get_test(kwargs['test']))
213                     if result != testcase.TestCase.EX_OK:
214                         LOGGER.error("The test case '%s' failed.",
215                                      kwargs['test'])
216                         self.overall_result = Result.EX_ERROR
217                 elif kwargs['test'] == "all":
218                     self.run_all()
219                 else:
220                     LOGGER.error("Unknown test case or tier '%s', or not "
221                                  "supported by the given scenario '%s'.",
222                                  kwargs['test'],
223                                  CONST.__getattribute__('DEPLOY_SCENARIO'))
224                     LOGGER.debug("Available tiers are:\n\n%s",
225                                  self.tiers)
226                     return Result.EX_ERROR
227             else:
228                 self.run_all()
229         except BlockingTestFailed:
230             pass
231         except Exception:  # pylint: disable=broad-except
232             LOGGER.exception("Failures when running testcase(s)")
233             self.overall_result = Result.EX_ERROR
234         if not self.tiers.get_test(kwargs['test']):
235             self.summary(self.tiers.get_tier(kwargs['test']))
236         LOGGER.info("Execution exit value: %s", self.overall_result)
237         return self.overall_result
238
239     def summary(self, tier=None):
240         """To generate functest report showing the overall results"""
241         msg = prettytable.PrettyTable(
242             header_style='upper', padding_width=5,
243             field_names=['env var', 'value'])
244         for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
245                         'CI_LOOP']:
246             msg.add_row([env_var, CONST.__getattribute__(env_var)])
247         LOGGER.info("Deployment description:\n\n%s\n", msg)
248         msg = prettytable.PrettyTable(
249             header_style='upper', padding_width=5,
250             field_names=['test case', 'project', 'tier',
251                          'duration', 'result'])
252         tiers = [tier] if tier else self.tiers.get_tiers()
253         for each_tier in tiers:
254             for test in each_tier.get_tests():
255                 try:
256                     test_case = self.executed_test_cases[test.get_name()]
257                 except KeyError:
258                     msg.add_row([test.get_name(), test.get_project(),
259                                  each_tier.get_name(), "00:00", "SKIP"])
260                 else:
261                     result = 'PASS' if(test_case.is_successful(
262                         ) == test_case.EX_OK) else 'FAIL'
263                     msg.add_row(
264                         [test_case.case_name, test_case.project_name,
265                          self.tiers.get_tier_name(test_case.case_name),
266                          test_case.get_duration(), result])
267             for test in each_tier.get_skipped_test():
268                 msg.add_row([test.get_name(), test.get_project(),
269                              each_tier.get_name(), "00:00", "SKIP"])
270         LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
271
272
273 def main():
274     """Entry point"""
275     logging.config.fileConfig(pkg_resources.resource_filename(
276         'functest', 'ci/logging.ini'))
277     logging.captureWarnings(True)
278     parser = RunTestsParser()
279     args = parser.parse_args(sys.argv[1:])
280     runner = Runner()
281     return runner.main(**args).value