Merge "Define Runner class"
[functest.git] / functest / ci / run_tests.py
1 #!/usr/bin/python -u
2 #
3 # Author: Jose Lausuch (jose.lausuch@ericsson.com)
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 import argparse
12 import enum
13 import importlib
14 import logging
15 import logging.config
16 import os
17 import re
18 import sys
19
20 import prettytable
21
22 import functest.ci.tier_builder as tb
23 import functest.core.testcase as testcase
24 import functest.utils.functest_utils as ft_utils
25 import functest.utils.openstack_clean as os_clean
26 import functest.utils.openstack_snapshot as os_snapshot
27 import functest.utils.openstack_utils as os_utils
28 from functest.utils.constants import CONST
29
30 # __name__ cannot be used here
31 logger = logging.getLogger('functest.ci.run_tests')
32
33
34 class Result(enum.Enum):
35     EX_OK = os.EX_OK
36     EX_ERROR = -1
37
38
39 class BlockingTestFailed(Exception):
40     pass
41
42
43 class TestNotEnabled(Exception):
44     pass
45
46
47 class RunTestsParser(object):
48
49     def __init__(self):
50         self.parser = argparse.ArgumentParser()
51         self.parser.add_argument("-t", "--test", dest="test", action='store',
52                                  help="Test case or tier (group of tests) "
53                                  "to be executed. It will run all the test "
54                                  "if not specified.")
55         self.parser.add_argument("-n", "--noclean", help="Do not clean "
56                                  "OpenStack resources after running each "
57                                  "test (default=false).",
58                                  action="store_true")
59         self.parser.add_argument("-r", "--report", help="Push results to "
60                                  "database (default=false).",
61                                  action="store_true")
62
63     def parse_args(self, argv=[]):
64         return vars(self.parser.parse_args(argv))
65
66
67 class Runner(object):
68
69     def __init__(self):
70         self.executed_test_cases = []
71         self.overall_result = Result.EX_OK
72         self.clean_flag = True
73         self.report_flag = False
74
75     @staticmethod
76     def print_separator(str, count=45):
77         line = ""
78         for i in range(0, count - 1):
79             line += str
80         logger.info("%s" % line)
81
82     @staticmethod
83     def source_rc_file():
84         rc_file = CONST.__getattribute__('openstack_creds')
85         if not os.path.isfile(rc_file):
86             raise Exception("RC file %s does not exist..." % rc_file)
87         logger.debug("Sourcing the OpenStack RC file...")
88         os_utils.source_credentials(rc_file)
89         for key, value in os.environ.iteritems():
90             if re.search("OS_", key):
91                 if key == 'OS_AUTH_URL':
92                     CONST.__setattr__('OS_AUTH_URL', value)
93                 elif key == 'OS_USERNAME':
94                     CONST.__setattr__('OS_USERNAME', value)
95                 elif key == 'OS_TENANT_NAME':
96                     CONST.__setattr__('OS_TENANT_NAME', value)
97                 elif key == 'OS_PASSWORD':
98                     CONST.__setattr__('OS_PASSWORD', value)
99
100     @staticmethod
101     def generate_os_snapshot():
102         os_snapshot.main()
103
104     @staticmethod
105     def cleanup():
106         os_clean.main()
107
108     @staticmethod
109     def get_run_dict(testname):
110         try:
111             dict = ft_utils.get_dict_by_test(testname)
112             if not dict:
113                 logger.error("Cannot get {}'s config options".format(testname))
114             elif 'run' in dict:
115                 return dict['run']
116             return None
117         except Exception:
118             logger.exception("Cannot get {}'s config options".format(testname))
119             return None
120
121     def run_test(self, test, tier_name, testcases=None):
122         if not test.is_enabled():
123             raise TestNotEnabled(
124                 "The test case {} is not enabled".format(test.get_name()))
125         test_name = test.get_name()
126         logger.info("\n")  # blank line
127         self.print_separator("=")
128         logger.info("Running test case '%s'..." % test_name)
129         self.print_separator("=")
130         logger.debug("\n%s" % test)
131         self.source_rc_file()
132
133         if test.needs_clean() and self.clean_flag:
134             self.generate_os_snapshot()
135
136         flags = (" -t %s" % (test_name))
137         if self.report_flag:
138             flags += " -r"
139
140         result = testcase.TestCase.EX_RUN_ERROR
141         run_dict = self.get_run_dict(test_name)
142         if run_dict:
143             try:
144                 module = importlib.import_module(run_dict['module'])
145                 cls = getattr(module, run_dict['class'])
146                 test_dict = ft_utils.get_dict_by_test(test_name)
147                 test_case = cls(**test_dict)
148                 self.executed_test_cases.append(test_case)
149                 try:
150                     kwargs = run_dict['args']
151                     result = test_case.run(**kwargs)
152                 except KeyError:
153                     result = test_case.run()
154                 if result == testcase.TestCase.EX_OK:
155                     if self.report_flag:
156                         test_case.push_to_db()
157                     result = test_case.is_successful()
158                 logger.info("Test result:\n\n%s\n", test_case)
159             except ImportError:
160                 logger.exception("Cannot import module {}".format(
161                     run_dict['module']))
162             except AttributeError:
163                 logger.exception("Cannot get class {}".format(
164                     run_dict['class']))
165         else:
166             raise Exception("Cannot import the class for the test case.")
167
168         if test.needs_clean() and self.clean_flag:
169             self.cleanup()
170         if result != testcase.TestCase.EX_OK:
171             logger.error("The test case '%s' failed. " % test_name)
172             self.overall_result = Result.EX_ERROR
173             if test.is_blocking():
174                 raise BlockingTestFailed(
175                     "The test case {} failed and is blocking".format(
176                         test.get_name()))
177
178     def run_tier(self, tier):
179         tier_name = tier.get_name()
180         tests = tier.get_tests()
181         if tests is None or len(tests) == 0:
182             logger.info("There are no supported test cases in this tier "
183                         "for the given scenario")
184             return 0
185         logger.info("\n\n")  # blank line
186         self.print_separator("#")
187         logger.info("Running tier '%s'" % tier_name)
188         self.print_separator("#")
189         logger.debug("\n%s" % tier)
190         for test in tests:
191             self.run_test(test, tier_name)
192
193     def run_all(self, tiers):
194         summary = ""
195         tiers_to_run = []
196
197         for tier in tiers.get_tiers():
198             if (len(tier.get_tests()) != 0 and
199                     re.search(CONST.__getattribute__('CI_LOOP'),
200                               tier.get_ci_loop()) is not None):
201                 tiers_to_run.append(tier)
202                 summary += ("\n    - %s:\n\t   %s"
203                             % (tier.get_name(),
204                                tier.get_test_names()))
205
206         logger.info("Tests to be executed:%s" % summary)
207         for tier in tiers_to_run:
208             self.run_tier(tier)
209
210     def main(self, **kwargs):
211         _tiers = tb.TierBuilder(
212             CONST.__getattribute__('INSTALLER_TYPE'),
213             CONST.__getattribute__('DEPLOY_SCENARIO'),
214             CONST.__getattribute__("functest_testcases_yaml"))
215
216         if kwargs['noclean']:
217             self.clean_flag = False
218
219         if kwargs['report']:
220             self.report_flag = True
221
222         try:
223             if kwargs['test']:
224                 self.source_rc_file()
225                 logger.error(kwargs['test'])
226                 if _tiers.get_tier(kwargs['test']):
227                     self.run_tier(_tiers.get_tier(kwargs['test']))
228                 elif _tiers.get_test(kwargs['test']):
229                     self.run_test(_tiers.get_test(kwargs['test']),
230                                   _tiers.get_tier_name(kwargs['test']),
231                                   kwargs['test'])
232                 elif kwargs['test'] == "all":
233                     self.run_all(_tiers)
234                 else:
235                     logger.error("Unknown test case or tier '%s', "
236                                  "or not supported by "
237                                  "the given scenario '%s'."
238                                  % (kwargs['test'],
239                                     CONST.__getattribute__('DEPLOY_SCENARIO')))
240                     logger.debug("Available tiers are:\n\n%s",
241                                  _tiers)
242                     return Result.EX_ERROR
243             else:
244                 self.run_all(_tiers)
245         except Exception:
246             logger.exception("Runner failed")
247             self.overall_result = Result.EX_ERROR
248
249         msg = prettytable.PrettyTable(
250             header_style='upper', padding_width=5,
251             field_names=['env var', 'value'])
252         for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
253                         'CI_LOOP']:
254             msg.add_row([env_var, CONST.__getattribute__(env_var)])
255         logger.info("Deployment description: \n\n%s\n", msg)
256
257         msg = prettytable.PrettyTable(
258             header_style='upper', padding_width=5,
259             field_names=['test case', 'project', 'tier', 'duration', 'result'])
260         for test_case in self.executed_test_cases:
261             result = 'PASS' if(test_case.is_successful(
262                     ) == test_case.EX_OK) else 'FAIL'
263             msg.add_row([test_case.case_name, test_case.project_name,
264                          _tiers.get_tier_name(test_case.case_name),
265                          test_case.get_duration(), result])
266         logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
267
268         logger.info("Execution exit value: %s" % self.overall_result)
269         return self.overall_result
270
271
272 if __name__ == '__main__':
273     logging.config.fileConfig(
274         CONST.__getattribute__('dir_functest_logging_cfg'))
275     parser = RunTestsParser()
276     args = parser.parse_args(sys.argv[1:])
277     runner = Runner()
278     sys.exit(runner.main(**args).value)