Move source_credentials() into run_tests.py
[functest.git] / functest / ci / run_tests.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2016 Ericsson AB and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 """ The entry of running tests:
11 1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
12 2) Execute the common operations on every testcase (run, push results to db...)
13 3) Return the right status code
14 """
15
16 import argparse
17 import importlib
18 import logging
19 import logging.config
20 import os
21 import re
22 import sys
23 import textwrap
24 import pkg_resources
25
26 import enum
27 import prettytable
28
29 import functest.ci.tier_builder as tb
30 import functest.core.testcase as testcase
31 import functest.utils.functest_utils as ft_utils
32 from functest.utils.constants import CONST
33
34 # __name__ cannot be used here
35 LOGGER = logging.getLogger('functest.ci.run_tests')
36
37 CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
38     'functest', 'ci/config_functest.yaml')
39
40
41 class Result(enum.Enum):
42     """The overall result in enumerated type"""
43     # pylint: disable=too-few-public-methods
44     EX_OK = os.EX_OK
45     EX_ERROR = -1
46
47
48 class BlockingTestFailed(Exception):
49     """Exception when the blocking test fails"""
50     pass
51
52
53 class TestNotEnabled(Exception):
54     """Exception when the test is not enabled"""
55     pass
56
57
58 class RunTestsParser(object):
59     """Parser to run tests"""
60     # pylint: disable=too-few-public-methods
61
62     def __init__(self):
63         self.parser = argparse.ArgumentParser()
64         self.parser.add_argument("-t", "--test", dest="test", action='store',
65                                  help="Test case or tier (group of tests) "
66                                  "to be executed. It will run all the test "
67                                  "if not specified.")
68         self.parser.add_argument("-n", "--noclean", help="Do not clean "
69                                  "OpenStack resources after running each "
70                                  "test (default=false).",
71                                  action="store_true")
72         self.parser.add_argument("-r", "--report", help="Push results to "
73                                  "database (default=false).",
74                                  action="store_true")
75
76     def parse_args(self, argv=None):
77         """Parse arguments.
78
79         It can call sys.exit if arguments are incorrect.
80
81         Returns:
82             the arguments from cmdline
83         """
84         return vars(self.parser.parse_args(argv))
85
86
87 class Runner(object):
88     """Runner class"""
89
90     def __init__(self):
91         self.executed_test_cases = {}
92         self.overall_result = Result.EX_OK
93         self.clean_flag = True
94         self.report_flag = False
95         self.tiers = tb.TierBuilder(
96             CONST.__getattribute__('INSTALLER_TYPE'),
97             CONST.__getattribute__('DEPLOY_SCENARIO'),
98             pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
99
100     @staticmethod
101     def source_envfile(rc_file):
102         """Source the env file passed as arg"""
103         with open(rc_file, "r") as rcfd:
104             for line in rcfd:
105                 var = (line.rstrip('"\n').replace('export ', '').split(
106                     "=") if re.search(r'(.*)=(.*)', line) else None)
107                 # The two next lines should be modified as soon as rc_file
108                 # conforms with common rules. Be aware that it could induce
109                 # issues if value starts with '
110                 if var:
111                     key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
112                     value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
113                     os.environ[key] = value
114                     setattr(CONST, key, value)
115
116     @staticmethod
117     def get_run_dict(testname):
118         """Obtain the 'run' block of the testcase from testcases.yaml"""
119         try:
120             dic_testcase = ft_utils.get_dict_by_test(testname)
121             if not dic_testcase:
122                 LOGGER.error("Cannot get %s's config options", testname)
123             elif 'run' in dic_testcase:
124                 return dic_testcase['run']
125             return None
126         except Exception:  # pylint: disable=broad-except
127             LOGGER.exception("Cannot get %s's config options", testname)
128             return None
129
130     def run_test(self, test):
131         """Run one test case"""
132         if not test.is_enabled():
133             raise TestNotEnabled(
134                 "The test case {} is not enabled".format(test.get_name()))
135         LOGGER.info("Running test case '%s'...", test.get_name())
136         result = testcase.TestCase.EX_RUN_ERROR
137         run_dict = self.get_run_dict(test.get_name())
138         if run_dict:
139             try:
140                 module = importlib.import_module(run_dict['module'])
141                 cls = getattr(module, run_dict['class'])
142                 test_dict = ft_utils.get_dict_by_test(test.get_name())
143                 test_case = cls(**test_dict)
144                 self.executed_test_cases[test.get_name()] = test_case
145                 try:
146                     kwargs = run_dict['args']
147                     test_case.run(**kwargs)
148                 except KeyError:
149                     test_case.run()
150                 if self.report_flag:
151                     test_case.push_to_db()
152                 if test.get_project() == "functest":
153                     result = test_case.is_successful()
154                 else:
155                     result = testcase.TestCase.EX_OK
156                 LOGGER.info("Test result:\n\n%s\n", test_case)
157                 if self.clean_flag:
158                     test_case.clean()
159             except ImportError:
160                 LOGGER.exception("Cannot import module %s", run_dict['module'])
161             except AttributeError:
162                 LOGGER.exception("Cannot get class %s", run_dict['class'])
163         else:
164             raise Exception("Cannot import the class for the test case.")
165         return result
166
167     def run_tier(self, tier):
168         """Run one tier"""
169         tier_name = tier.get_name()
170         tests = tier.get_tests()
171         if not tests:
172             LOGGER.info("There are no supported test cases in this tier "
173                         "for the given scenario")
174             self.overall_result = Result.EX_ERROR
175         else:
176             LOGGER.info("Running tier '%s'", tier_name)
177             for test in tests:
178                 self.run_test(test)
179                 test_case = self.executed_test_cases[test.get_name()]
180                 if test_case.is_successful() != testcase.TestCase.EX_OK:
181                     LOGGER.error("The test case '%s' failed.", test.get_name())
182                     if test.get_project() == "functest":
183                         self.overall_result = Result.EX_ERROR
184                     if test.is_blocking():
185                         raise BlockingTestFailed(
186                             "The test case {} failed and is blocking".format(
187                                 test.get_name()))
188         return self.overall_result
189
190     def run_all(self):
191         """Run all available testcases"""
192         tiers_to_run = []
193         msg = prettytable.PrettyTable(
194             header_style='upper', padding_width=5,
195             field_names=['tiers', 'order', 'CI Loop', 'description',
196                          'testcases'])
197         for tier in self.tiers.get_tiers():
198             if (tier.get_tests() and
199                     re.search(CONST.__getattribute__('CI_LOOP'),
200                               tier.get_ci_loop()) is not None):
201                 tiers_to_run.append(tier)
202                 msg.add_row([tier.get_name(), tier.get_order(),
203                              tier.get_ci_loop(),
204                              textwrap.fill(tier.description, width=40),
205                              textwrap.fill(' '.join([str(x.get_name(
206                                  )) for x in tier.get_tests()]), width=40)])
207         LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
208         for tier in tiers_to_run:
209             self.run_tier(tier)
210
211     def main(self, **kwargs):
212         """Entry point of class Runner"""
213         if 'noclean' in kwargs:
214             self.clean_flag = not kwargs['noclean']
215         if 'report' in kwargs:
216             self.report_flag = kwargs['report']
217         try:
218             if 'test' in kwargs:
219                 LOGGER.debug("Sourcing the credential file...")
220                 self.source_envfile(getattr(CONST, 'env_file'))
221
222                 LOGGER.debug("Test args: %s", kwargs['test'])
223                 if self.tiers.get_tier(kwargs['test']):
224                     self.run_tier(self.tiers.get_tier(kwargs['test']))
225                 elif self.tiers.get_test(kwargs['test']):
226                     result = self.run_test(
227                         self.tiers.get_test(kwargs['test']))
228                     if result != testcase.TestCase.EX_OK:
229                         LOGGER.error("The test case '%s' failed.",
230                                      kwargs['test'])
231                         self.overall_result = Result.EX_ERROR
232                 elif kwargs['test'] == "all":
233                     self.run_all()
234                 else:
235                     LOGGER.error("Unknown test case or tier '%s', or not "
236                                  "supported by the given scenario '%s'.",
237                                  kwargs['test'],
238                                  CONST.__getattribute__('DEPLOY_SCENARIO'))
239                     LOGGER.debug("Available tiers are:\n\n%s",
240                                  self.tiers)
241                     return Result.EX_ERROR
242             else:
243                 self.run_all()
244         except BlockingTestFailed:
245             pass
246         except Exception:  # pylint: disable=broad-except
247             LOGGER.exception("Failures when running testcase(s)")
248             self.overall_result = Result.EX_ERROR
249         if not self.tiers.get_test(kwargs['test']):
250             self.summary(self.tiers.get_tier(kwargs['test']))
251         LOGGER.info("Execution exit value: %s", self.overall_result)
252         return self.overall_result
253
254     def summary(self, tier=None):
255         """To generate functest report showing the overall results"""
256         msg = prettytable.PrettyTable(
257             header_style='upper', padding_width=5,
258             field_names=['env var', 'value'])
259         for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
260                         'CI_LOOP']:
261             msg.add_row([env_var, CONST.__getattribute__(env_var)])
262         LOGGER.info("Deployment description:\n\n%s\n", msg)
263         msg = prettytable.PrettyTable(
264             header_style='upper', padding_width=5,
265             field_names=['test case', 'project', 'tier',
266                          'duration', 'result'])
267         tiers = [tier] if tier else self.tiers.get_tiers()
268         for each_tier in tiers:
269             for test in each_tier.get_tests():
270                 try:
271                     test_case = self.executed_test_cases[test.get_name()]
272                 except KeyError:
273                     msg.add_row([test.get_name(), test.get_project(),
274                                  each_tier.get_name(), "00:00", "SKIP"])
275                 else:
276                     result = 'PASS' if(test_case.is_successful(
277                         ) == test_case.EX_OK) else 'FAIL'
278                     msg.add_row(
279                         [test_case.case_name, test_case.project_name,
280                          self.tiers.get_tier_name(test_case.case_name),
281                          test_case.get_duration(), result])
282             for test in each_tier.get_skipped_test():
283                 msg.add_row([test.get_name(), test.get_project(),
284                              each_tier.get_name(), "00:00", "SKIP"])
285         LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
286
287
288 def main():
289     """Entry point"""
290     logging.config.fileConfig(pkg_resources.resource_filename(
291         'functest', 'ci/logging.ini'))
292     logging.captureWarnings(True)
293     parser = RunTestsParser()
294     args = parser.parse_args(sys.argv[1:])
295     runner = Runner()
296     return runner.main(**args).value