Merge "Inject all envs in openrc into CONST"
[functest.git] / functest / ci / run_tests.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2016 Ericsson AB and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 """ The entry of running tests:
11 1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
14 """
15
16 import argparse
17 import enum
18 import importlib
19 import logging
20 import logging.config
21 import os
22 import re
23 import sys
24 import textwrap
25 import pkg_resources
26
27 import prettytable
28
29 import functest.ci.tier_builder as tb
30 import functest.core.testcase as testcase
31 import functest.utils.functest_utils as ft_utils
32 import functest.utils.openstack_utils as os_utils
33 from functest.utils.constants import CONST
34
35 # __name__ cannot be used here
36 LOGGER = logging.getLogger('functest.ci.run_tests')
37
38 CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
39     'functest', 'ci/config_functest.yaml')
40
41
42 class Result(enum.Enum):
43     """The overall result in enumerated type"""
44     # pylint: disable=too-few-public-methods
45     EX_OK = os.EX_OK
46     EX_ERROR = -1
47
48
49 class BlockingTestFailed(Exception):
50     """Exception when the blocking test fails"""
51     pass
52
53
54 class TestNotEnabled(Exception):
55     """Exception when the test is not enabled"""
56     pass
57
58
59 class RunTestsParser(object):
60     """Parser to run tests"""
61     # pylint: disable=too-few-public-methods
62
63     def __init__(self):
64         self.parser = argparse.ArgumentParser()
65         self.parser.add_argument("-t", "--test", dest="test", action='store',
66                                  help="Test case or tier (group of tests) "
67                                  "to be executed. It will run all the test "
68                                  "if not specified.")
69         self.parser.add_argument("-n", "--noclean", help="Do not clean "
70                                  "OpenStack resources after running each "
71                                  "test (default=false).",
72                                  action="store_true")
73         self.parser.add_argument("-r", "--report", help="Push results to "
74                                  "database (default=false).",
75                                  action="store_true")
76
77     def parse_args(self, argv=None):
78         """Parse arguments.
79
80         It can call sys.exit if arguments are incorrect.
81
82         Returns:
83             the arguments from cmdline
84         """
85         return vars(self.parser.parse_args(argv))
86
87
88 class Runner(object):
89     """Runner class"""
90
91     def __init__(self):
92         self.executed_test_cases = {}
93         self.overall_result = Result.EX_OK
94         self.clean_flag = True
95         self.report_flag = False
96         self._tiers = tb.TierBuilder(
97             CONST.__getattribute__('INSTALLER_TYPE'),
98             CONST.__getattribute__('DEPLOY_SCENARIO'),
99             pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
100
101     @staticmethod
102     def source_rc_file():
103         """Set the environmental vars from openstack.creds"""
104
105         rc_file = CONST.__getattribute__('openstack_creds')
106         if not os.path.isfile(rc_file):
107             raise Exception("RC file %s does not exist..." % rc_file)
108         LOGGER.debug("Sourcing the OpenStack RC file...")
109         os_utils.source_credentials(rc_file)
110
111     @staticmethod
112     def get_run_dict(testname):
113         """Obtain the the 'run' block of the testcase from testcases.yaml"""
114         try:
115             dic_testcase = ft_utils.get_dict_by_test(testname)
116             if not dic_testcase:
117                 LOGGER.error("Cannot get %s's config options", testname)
118             elif 'run' in dic_testcase:
119                 return dic_testcase['run']
120             return None
121         except Exception:  # pylint: disable=broad-except
122             LOGGER.exception("Cannot get %s's config options", testname)
123             return None
124
125     def run_test(self, test):
126         """Run one test case"""
127         if not test.is_enabled():
128             raise TestNotEnabled(
129                 "The test case {} is not enabled".format(test.get_name()))
130         LOGGER.info("Running test case '%s'...", test.get_name())
131         result = testcase.TestCase.EX_RUN_ERROR
132         run_dict = self.get_run_dict(test.get_name())
133         if run_dict:
134             try:
135                 module = importlib.import_module(run_dict['module'])
136                 cls = getattr(module, run_dict['class'])
137                 test_dict = ft_utils.get_dict_by_test(test.get_name())
138                 test_case = cls(**test_dict)
139                 self.executed_test_cases[test.get_name()] = test_case
140                 if self.clean_flag:
141                     if test_case.create_snapshot() != test_case.EX_OK:
142                         return testcase.TestCase.EX_RUN_ERROR
143                 try:
144                     kwargs = run_dict['args']
145                     test_case.run(**kwargs)
146                 except KeyError:
147                     test_case.run()
148                 if self.report_flag:
149                     test_case.push_to_db()
150                 if test.get_project() == "functest":
151                     result = test_case.is_successful()
152                 else:
153                     result = testcase.TestCase.EX_OK
154                 LOGGER.info("Test result:\n\n%s\n", test_case)
155                 if self.clean_flag:
156                     test_case.clean()
157             except ImportError:
158                 LOGGER.exception("Cannot import module %s", run_dict['module'])
159             except AttributeError:
160                 LOGGER.exception("Cannot get class %s", run_dict['class'])
161         else:
162             raise Exception("Cannot import the class for the test case.")
163         return result
164
165     def run_tier(self, tier):
166         """Run one tier"""
167         tier_name = tier.get_name()
168         tests = tier.get_tests()
169         if not tests:
170             LOGGER.info("There are no supported test cases in this tier "
171                         "for the given scenario")
172             self.overall_result = Result.EX_ERROR
173         else:
174             LOGGER.info("Running tier '%s'", tier_name)
175             for test in tests:
176                 self.run_test(test)
177                 test_case = self.executed_test_cases[test.get_name()]
178                 if test_case.is_successful() != testcase.TestCase.EX_OK:
179                     LOGGER.error("The test case '%s' failed.", test.get_name())
180                     if test.get_project() == "functest":
181                         self.overall_result = Result.EX_ERROR
182                     if test.is_blocking():
183                         raise BlockingTestFailed(
184                             "The test case {} failed and is blocking".format(
185                                 test.get_name()))
186         return self.overall_result
187
188     def run_all(self):
189         """Run all available testcases"""
190         tiers_to_run = []
191         msg = prettytable.PrettyTable(
192             header_style='upper', padding_width=5,
193             field_names=['tiers', 'order', 'CI Loop', 'description',
194                          'testcases'])
195         for tier in self._tiers.get_tiers():
196             if (tier.get_tests() and
197                     re.search(CONST.__getattribute__('CI_LOOP'),
198                               tier.get_ci_loop()) is not None):
199                 tiers_to_run.append(tier)
200                 msg.add_row([tier.get_name(), tier.get_order(),
201                              tier.get_ci_loop(),
202                              textwrap.fill(tier.description, width=40),
203                              textwrap.fill(' '.join([str(x.get_name(
204                                  )) for x in tier.get_tests()]), width=40)])
205         LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
206         for tier in tiers_to_run:
207             self.run_tier(tier)
208
209     def main(self, **kwargs):
210         """Entry point of class Runner"""
211         if 'noclean' in kwargs:
212             self.clean_flag = not kwargs['noclean']
213         if 'report' in kwargs:
214             self.report_flag = kwargs['report']
215         try:
216             if 'test' in kwargs:
217                 self.source_rc_file()
218                 LOGGER.debug("Test args: %s", kwargs['test'])
219                 if self._tiers.get_tier(kwargs['test']):
220                     self.run_tier(self._tiers.get_tier(kwargs['test']))
221                 elif self._tiers.get_test(kwargs['test']):
222                     result = self.run_test(
223                         self._tiers.get_test(kwargs['test']))
224                     if result != testcase.TestCase.EX_OK:
225                         LOGGER.error("The test case '%s' failed.",
226                                      kwargs['test'])
227                         self.overall_result = Result.EX_ERROR
228                 elif kwargs['test'] == "all":
229                     self.run_all()
230                 else:
231                     LOGGER.error("Unknown test case or tier '%s', or not "
232                                  "supported by the given scenario '%s'.",
233                                  kwargs['test'],
234                                  CONST.__getattribute__('DEPLOY_SCENARIO'))
235                     LOGGER.debug("Available tiers are:\n\n%s",
236                                  self._tiers)
237                     return Result.EX_ERROR
238             else:
239                 self.run_all()
240         except BlockingTestFailed:
241             pass
242         except Exception:  # pylint: disable=broad-except
243             LOGGER.exception("Failures when running testcase(s)")
244             self.overall_result = Result.EX_ERROR
245         if not self._tiers.get_test(kwargs['test']):
246             self.summary(self._tiers.get_tier(kwargs['test']))
247         LOGGER.info("Execution exit value: %s", self.overall_result)
248         return self.overall_result
249
250     def summary(self, tier=None):
251         """To generate functest report showing the overall results"""
252         msg = prettytable.PrettyTable(
253             header_style='upper', padding_width=5,
254             field_names=['env var', 'value'])
255         for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
256                         'CI_LOOP']:
257             msg.add_row([env_var, CONST.__getattribute__(env_var)])
258         LOGGER.info("Deployment description:\n\n%s\n", msg)
259         msg = prettytable.PrettyTable(
260             header_style='upper', padding_width=5,
261             field_names=['test case', 'project', 'tier',
262                          'duration', 'result'])
263         tiers = [tier] if tier else self._tiers.get_tiers()
264         for each_tier in tiers:
265             for test in each_tier.get_tests():
266                 try:
267                     test_case = self.executed_test_cases[test.get_name()]
268                 except KeyError:
269                     msg.add_row([test.get_name(), test.get_project(),
270                                  each_tier.get_name(), "00:00", "SKIP"])
271                 else:
272                     result = 'PASS' if(test_case.is_successful(
273                         ) == test_case.EX_OK) else 'FAIL'
274                     msg.add_row(
275                         [test_case.case_name, test_case.project_name,
276                          self._tiers.get_tier_name(test_case.case_name),
277                          test_case.get_duration(), result])
278             for test in each_tier.get_skipped_test():
279                 msg.add_row([test.get_name(), test.get_project(),
280                              each_tier.get_name(), "00:00", "SKIP"])
281         LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
282
283
284 def main():
285     """Entry point"""
286     logging.config.fileConfig(pkg_resources.resource_filename(
287         'functest', 'ci/logging.ini'))
288     logging.captureWarnings(True)
289     parser = RunTestsParser()
290     args = parser.parse_args(sys.argv[1:])
291     runner = Runner()
292     return runner.main(**args).value