Merge "Install releng via requirements.txt"
[functest.git] / functest / ci / run_tests.py
1 #!/usr/bin/python -u
2 #
3 # Author: Jose Lausuch (jose.lausuch@ericsson.com)
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 import argparse
12 import enum
13 import importlib
14 import logging
15 import logging.config
16 import os
17 import re
18 import sys
19
20 import prettytable
21
22 import functest.ci.tier_builder as tb
23 import functest.core.testcase as testcase
24 import functest.utils.functest_utils as ft_utils
25 import functest.utils.openstack_clean as os_clean
26 import functest.utils.openstack_snapshot as os_snapshot
27 import functest.utils.openstack_utils as os_utils
28 from functest.utils.constants import CONST
29
30 # __name__ cannot be used here
31 logger = logging.getLogger('functest.ci.run_tests')
32
33
34 class Result(enum.Enum):
35     EX_OK = os.EX_OK
36     EX_ERROR = -1
37
38
39 class BlockingTestFailed(Exception):
40     pass
41
42
43 class TestNotEnabled(Exception):
44     pass
45
46
47 class RunTestsParser(object):
48
49     def __init__(self):
50         self.parser = argparse.ArgumentParser()
51         self.parser.add_argument("-t", "--test", dest="test", action='store',
52                                  help="Test case or tier (group of tests) "
53                                  "to be executed. It will run all the test "
54                                  "if not specified.")
55         self.parser.add_argument("-n", "--noclean", help="Do not clean "
56                                  "OpenStack resources after running each "
57                                  "test (default=false).",
58                                  action="store_true")
59         self.parser.add_argument("-r", "--report", help="Push results to "
60                                  "database (default=false).",
61                                  action="store_true")
62
63     def parse_args(self, argv=[]):
64         return vars(self.parser.parse_args(argv))
65
66
67 class Runner(object):
68
69     def __init__(self):
70         self.executed_test_cases = []
71         self.overall_result = Result.EX_OK
72         self.clean_flag = True
73         self.report_flag = False
74
75     @staticmethod
76     def print_separator(str, count=45):
77         line = ""
78         for i in range(0, count - 1):
79             line += str
80         logger.info("%s" % line)
81
82     @staticmethod
83     def source_rc_file():
84         rc_file = CONST.__getattribute__('openstack_creds')
85         if not os.path.isfile(rc_file):
86             raise Exception("RC file %s does not exist..." % rc_file)
87         logger.debug("Sourcing the OpenStack RC file...")
88         os_utils.source_credentials(rc_file)
89         for key, value in os.environ.iteritems():
90             if re.search("OS_", key):
91                 if key == 'OS_AUTH_URL':
92                     CONST.__setattr__('OS_AUTH_URL', value)
93                 elif key == 'OS_USERNAME':
94                     CONST.__setattr__('OS_USERNAME', value)
95                 elif key == 'OS_TENANT_NAME':
96                     CONST.__setattr__('OS_TENANT_NAME', value)
97                 elif key == 'OS_PASSWORD':
98                     CONST.__setattr__('OS_PASSWORD', value)
99
100     @staticmethod
101     def generate_os_snapshot():
102         os_snapshot.main()
103
104     @staticmethod
105     def cleanup():
106         os_clean.main()
107
108     @staticmethod
109     def get_run_dict(testname):
110         try:
111             dict = ft_utils.get_dict_by_test(testname)
112             if not dict:
113                 logger.error("Cannot get {}'s config options".format(testname))
114             elif 'run' in dict:
115                 return dict['run']
116             return None
117         except Exception:
118             logger.exception("Cannot get {}'s config options".format(testname))
119             return None
120
121     def run_test(self, test, tier_name, testcases=None):
122         if not test.is_enabled():
123             raise TestNotEnabled(
124                 "The test case {} is not enabled".format(test.get_name()))
125         logger.info("\n")  # blank line
126         self.print_separator("=")
127         logger.info("Running test case '%s'..." % test.get_name())
128         self.print_separator("=")
129         logger.debug("\n%s" % test)
130         self.source_rc_file()
131
132         if test.needs_clean() and self.clean_flag:
133             self.generate_os_snapshot()
134
135         flags = " -t %s" % test.get_name()
136         if self.report_flag:
137             flags += " -r"
138
139         result = testcase.TestCase.EX_RUN_ERROR
140         run_dict = self.get_run_dict(test.get_name())
141         if run_dict:
142             try:
143                 module = importlib.import_module(run_dict['module'])
144                 cls = getattr(module, run_dict['class'])
145                 test_dict = ft_utils.get_dict_by_test(test.get_name())
146                 test_case = cls(**test_dict)
147                 self.executed_test_cases.append(test_case)
148                 try:
149                     kwargs = run_dict['args']
150                     result = test_case.run(**kwargs)
151                 except KeyError:
152                     result = test_case.run()
153                 if result == testcase.TestCase.EX_OK:
154                     if self.report_flag:
155                         test_case.push_to_db()
156                     result = test_case.is_successful()
157                 logger.info("Test result:\n\n%s\n", test_case)
158             except ImportError:
159                 logger.exception("Cannot import module {}".format(
160                     run_dict['module']))
161             except AttributeError:
162                 logger.exception("Cannot get class {}".format(
163                     run_dict['class']))
164         else:
165             raise Exception("Cannot import the class for the test case.")
166
167         if test.needs_clean() and self.clean_flag:
168             self.cleanup()
169         if result != testcase.TestCase.EX_OK:
170             logger.error("The test case '%s' failed. " % test.get_name())
171             self.overall_result = Result.EX_ERROR
172             if test.is_blocking():
173                 raise BlockingTestFailed(
174                     "The test case {} failed and is blocking".format(
175                         test.get_name()))
176
177     def run_tier(self, tier):
178         tier_name = tier.get_name()
179         tests = tier.get_tests()
180         if tests is None or len(tests) == 0:
181             logger.info("There are no supported test cases in this tier "
182                         "for the given scenario")
183             return 0
184         logger.info("\n\n")  # blank line
185         self.print_separator("#")
186         logger.info("Running tier '%s'" % tier_name)
187         self.print_separator("#")
188         logger.debug("\n%s" % tier)
189         for test in tests:
190             self.run_test(test, tier_name)
191
192     def run_all(self, tiers):
193         summary = ""
194         tiers_to_run = []
195
196         for tier in tiers.get_tiers():
197             if (len(tier.get_tests()) != 0 and
198                     re.search(CONST.__getattribute__('CI_LOOP'),
199                               tier.get_ci_loop()) is not None):
200                 tiers_to_run.append(tier)
201                 summary += ("\n    - %s:\n\t   %s"
202                             % (tier.get_name(),
203                                tier.get_test_names()))
204
205         logger.info("Tests to be executed:%s" % summary)
206         for tier in tiers_to_run:
207             self.run_tier(tier)
208
209     def main(self, **kwargs):
210         _tiers = tb.TierBuilder(
211             CONST.__getattribute__('INSTALLER_TYPE'),
212             CONST.__getattribute__('DEPLOY_SCENARIO'),
213             CONST.__getattribute__("functest_testcases_yaml"))
214
215         if kwargs['noclean']:
216             self.clean_flag = False
217
218         if kwargs['report']:
219             self.report_flag = True
220
221         try:
222             if kwargs['test']:
223                 self.source_rc_file()
224                 logger.error(kwargs['test'])
225                 if _tiers.get_tier(kwargs['test']):
226                     self.run_tier(_tiers.get_tier(kwargs['test']))
227                 elif _tiers.get_test(kwargs['test']):
228                     self.run_test(_tiers.get_test(kwargs['test']),
229                                   _tiers.get_tier_name(kwargs['test']),
230                                   kwargs['test'])
231                 elif kwargs['test'] == "all":
232                     self.run_all(_tiers)
233                 else:
234                     logger.error("Unknown test case or tier '%s', "
235                                  "or not supported by "
236                                  "the given scenario '%s'."
237                                  % (kwargs['test'],
238                                     CONST.__getattribute__('DEPLOY_SCENARIO')))
239                     logger.debug("Available tiers are:\n\n%s",
240                                  _tiers)
241                     return Result.EX_ERROR
242             else:
243                 self.run_all(_tiers)
244         except BlockingTestFailed:
245             pass
246         except Exception:
247             logger.exception("Failures when running testcase(s)")
248             self.overall_result = Result.EX_ERROR
249
250         msg = prettytable.PrettyTable(
251             header_style='upper', padding_width=5,
252             field_names=['env var', 'value'])
253         for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
254                         'CI_LOOP']:
255             msg.add_row([env_var, CONST.__getattribute__(env_var)])
256         logger.info("Deployment description: \n\n%s\n", msg)
257
258         msg = prettytable.PrettyTable(
259             header_style='upper', padding_width=5,
260             field_names=['test case', 'project', 'tier', 'duration', 'result'])
261         for test_case in self.executed_test_cases:
262             result = 'PASS' if(test_case.is_successful(
263                     ) == test_case.EX_OK) else 'FAIL'
264             msg.add_row([test_case.case_name, test_case.project_name,
265                          _tiers.get_tier_name(test_case.case_name),
266                          test_case.get_duration(), result])
267         logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
268
269         logger.info("Execution exit value: %s" % self.overall_result)
270         return self.overall_result
271
272
273 if __name__ == '__main__':
274     logging.config.fileConfig(
275         CONST.__getattribute__('dir_functest_logging_cfg'))
276     parser = RunTestsParser()
277     args = parser.parse_args(sys.argv[1:])
278     runner = Runner()
279     sys.exit(runner.main(**args).value)