b95e1008b42a87401738b96a7bffb8b867e04283
[functest.git] / functest / ci / run_tests.py
1 #!/usr/bin/env python
2 #
3 # Author: Jose Lausuch (jose.lausuch@ericsson.com)
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 import argparse
12 import enum
13 import importlib
14 import logging
15 import logging.config
16 import os
17 import pkg_resources
18 import re
19 import sys
20
21 import prettytable
22
23 import functest.ci.tier_builder as tb
24 import functest.core.testcase as testcase
25 import functest.utils.functest_utils as ft_utils
26 import functest.utils.openstack_utils as os_utils
27 from functest.utils.constants import CONST
28
29 # __name__ cannot be used here
30 logger = logging.getLogger('functest.ci.run_tests')
31
32
33 class Result(enum.Enum):
34     EX_OK = os.EX_OK
35     EX_ERROR = -1
36
37
38 class BlockingTestFailed(Exception):
39     pass
40
41
42 class TestNotEnabled(Exception):
43     pass
44
45
46 class RunTestsParser(object):
47
48     def __init__(self):
49         self.parser = argparse.ArgumentParser()
50         self.parser.add_argument("-t", "--test", dest="test", action='store',
51                                  help="Test case or tier (group of tests) "
52                                  "to be executed. It will run all the test "
53                                  "if not specified.")
54         self.parser.add_argument("-n", "--noclean", help="Do not clean "
55                                  "OpenStack resources after running each "
56                                  "test (default=false).",
57                                  action="store_true")
58         self.parser.add_argument("-r", "--report", help="Push results to "
59                                  "database (default=false).",
60                                  action="store_true")
61
62     def parse_args(self, argv=[]):
63         return vars(self.parser.parse_args(argv))
64
65
66 class Runner(object):
67
68     def __init__(self):
69         self.executed_test_cases = []
70         self.overall_result = Result.EX_OK
71         self.clean_flag = True
72         self.report_flag = False
73
74     @staticmethod
75     def print_separator(str, count=45):
76         line = ""
77         for i in range(0, count - 1):
78             line += str
79         logger.info("%s" % line)
80
81     @staticmethod
82     def source_rc_file():
83         rc_file = CONST.__getattribute__('openstack_creds')
84         if not os.path.isfile(rc_file):
85             raise Exception("RC file %s does not exist..." % rc_file)
86         logger.debug("Sourcing the OpenStack RC file...")
87         os_utils.source_credentials(rc_file)
88         for key, value in os.environ.iteritems():
89             if re.search("OS_", key):
90                 if key == 'OS_AUTH_URL':
91                     CONST.__setattr__('OS_AUTH_URL', value)
92                 elif key == 'OS_USERNAME':
93                     CONST.__setattr__('OS_USERNAME', value)
94                 elif key == 'OS_TENANT_NAME':
95                     CONST.__setattr__('OS_TENANT_NAME', value)
96                 elif key == 'OS_PASSWORD':
97                     CONST.__setattr__('OS_PASSWORD', value)
98
99     @staticmethod
100     def get_run_dict(testname):
101         try:
102             dict = ft_utils.get_dict_by_test(testname)
103             if not dict:
104                 logger.error("Cannot get {}'s config options".format(testname))
105             elif 'run' in dict:
106                 return dict['run']
107             return None
108         except Exception:
109             logger.exception("Cannot get {}'s config options".format(testname))
110             return None
111
112     def run_test(self, test, tier_name, testcases=None):
113         if not test.is_enabled():
114             raise TestNotEnabled(
115                 "The test case {} is not enabled".format(test.get_name()))
116         logger.info("\n")  # blank line
117         self.print_separator("=")
118         logger.info("Running test case '%s'...", test.get_name())
119         self.print_separator("=")
120         logger.debug("\n%s" % test)
121         self.source_rc_file()
122
123         flags = " -t %s" % test.get_name()
124         if self.report_flag:
125             flags += " -r"
126
127         result = testcase.TestCase.EX_RUN_ERROR
128         run_dict = self.get_run_dict(test.get_name())
129         if run_dict:
130             try:
131                 module = importlib.import_module(run_dict['module'])
132                 cls = getattr(module, run_dict['class'])
133                 test_dict = ft_utils.get_dict_by_test(test.get_name())
134                 test_case = cls(**test_dict)
135                 self.executed_test_cases.append(test_case)
136                 if self.clean_flag:
137                     if test_case.create_snapshot() != test_case.EX_OK:
138                         return result
139                 try:
140                     kwargs = run_dict['args']
141                     result = test_case.run(**kwargs)
142                 except KeyError:
143                     result = test_case.run()
144                 if result == testcase.TestCase.EX_OK:
145                     if self.report_flag:
146                         test_case.push_to_db()
147                     result = test_case.is_successful()
148                 logger.info("Test result:\n\n%s\n", test_case)
149                 if self.clean_flag:
150                     test_case.clean()
151             except ImportError:
152                 logger.exception("Cannot import module {}".format(
153                     run_dict['module']))
154             except AttributeError:
155                 logger.exception("Cannot get class {}".format(
156                     run_dict['class']))
157         else:
158             raise Exception("Cannot import the class for the test case.")
159
160         return result
161
162     def run_tier(self, tier):
163         tier_name = tier.get_name()
164         tests = tier.get_tests()
165         if tests is None or len(tests) == 0:
166             logger.info("There are no supported test cases in this tier "
167                         "for the given scenario")
168             return 0
169         logger.info("\n\n")  # blank line
170         self.print_separator("#")
171         logger.info("Running tier '%s'" % tier_name)
172         self.print_separator("#")
173         logger.debug("\n%s" % tier)
174         for test in tests:
175             result = self.run_test(test, tier_name)
176             if result != testcase.TestCase.EX_OK:
177                 logger.error("The test case '%s' failed.", test.get_name())
178                 self.overall_result = Result.EX_ERROR
179                 if test.is_blocking():
180                     raise BlockingTestFailed(
181                         "The test case {} failed and is blocking".format(
182                             test.get_name()))
183
184     def run_all(self, tiers):
185         summary = ""
186         tiers_to_run = []
187
188         for tier in tiers.get_tiers():
189             if (len(tier.get_tests()) != 0 and
190                     re.search(CONST.__getattribute__('CI_LOOP'),
191                               tier.get_ci_loop()) is not None):
192                 tiers_to_run.append(tier)
193                 summary += ("\n    - %s:\n\t   %s"
194                             % (tier.get_name(),
195                                tier.get_test_names()))
196
197         logger.info("Tests to be executed:%s" % summary)
198         for tier in tiers_to_run:
199             self.run_tier(tier)
200
201     def main(self, **kwargs):
202         _tiers = tb.TierBuilder(
203             CONST.__getattribute__('INSTALLER_TYPE'),
204             CONST.__getattribute__('DEPLOY_SCENARIO'),
205             pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
206
207         if kwargs['noclean']:
208             self.clean_flag = False
209
210         if kwargs['report']:
211             self.report_flag = True
212
213         try:
214             if kwargs['test']:
215                 self.source_rc_file()
216                 logger.debug("Test args: %s", kwargs['test'])
217                 if _tiers.get_tier(kwargs['test']):
218                     self.run_tier(_tiers.get_tier(kwargs['test']))
219                 elif _tiers.get_test(kwargs['test']):
220                     result = self.run_test(
221                         _tiers.get_test(kwargs['test']),
222                         _tiers.get_tier_name(kwargs['test']),
223                         kwargs['test'])
224                     if result != testcase.TestCase.EX_OK:
225                         logger.error("The test case '%s' failed.",
226                                      kwargs['test'])
227                         self.overall_result = Result.EX_ERROR
228                 elif kwargs['test'] == "all":
229                     self.run_all(_tiers)
230                 else:
231                     logger.error("Unknown test case or tier '%s', "
232                                  "or not supported by "
233                                  "the given scenario '%s'."
234                                  % (kwargs['test'],
235                                     CONST.__getattribute__('DEPLOY_SCENARIO')))
236                     logger.debug("Available tiers are:\n\n%s",
237                                  _tiers)
238                     return Result.EX_ERROR
239             else:
240                 self.run_all(_tiers)
241         except BlockingTestFailed:
242             pass
243         except Exception:
244             logger.exception("Failures when running testcase(s)")
245             self.overall_result = Result.EX_ERROR
246
247         msg = prettytable.PrettyTable(
248             header_style='upper', padding_width=5,
249             field_names=['env var', 'value'])
250         for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
251                         'CI_LOOP']:
252             msg.add_row([env_var, CONST.__getattribute__(env_var)])
253         logger.info("Deployment description: \n\n%s\n", msg)
254
255         if len(self.executed_test_cases) > 1:
256             msg = prettytable.PrettyTable(
257                 header_style='upper', padding_width=5,
258                 field_names=['test case', 'project', 'tier',
259                              'duration', 'result'])
260             for test_case in self.executed_test_cases:
261                 result = 'PASS' if(test_case.is_successful(
262                         ) == test_case.EX_OK) else 'FAIL'
263                 msg.add_row([test_case.case_name, test_case.project_name,
264                              _tiers.get_tier_name(test_case.case_name),
265                              test_case.get_duration(), result])
266             logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
267
268         logger.info("Execution exit value: %s" % self.overall_result)
269         return self.overall_result
270
271
272 def main():
273     logging.config.fileConfig(pkg_resources.resource_filename(
274         'functest', 'ci/logging.ini'))
275     parser = RunTestsParser()
276     args = parser.parse_args(sys.argv[1:])
277     runner = Runner()
278     return runner.main(**args).value