Merge "Enable tempest offline by use_custom_images=True"
[functest.git] / functest / ci / run_tests.py
1 #!/usr/bin/env python
2 #
3 # Author: Jose Lausuch (jose.lausuch@ericsson.com)
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 import argparse
12 import enum
13 import importlib
14 import logging
15 import logging.config
16 import os
17 import re
18 import sys
19
20 import prettytable
21
22 import functest.ci.tier_builder as tb
23 import functest.core.testcase as testcase
24 import functest.utils.functest_utils as ft_utils
25 import functest.utils.openstack_utils as os_utils
26 from functest.utils.constants import CONST
27
28 # __name__ cannot be used here
29 logger = logging.getLogger('functest.ci.run_tests')
30
31
32 class Result(enum.Enum):
33     EX_OK = os.EX_OK
34     EX_ERROR = -1
35
36
37 class BlockingTestFailed(Exception):
38     pass
39
40
41 class TestNotEnabled(Exception):
42     pass
43
44
45 class RunTestsParser(object):
46
47     def __init__(self):
48         self.parser = argparse.ArgumentParser()
49         self.parser.add_argument("-t", "--test", dest="test", action='store',
50                                  help="Test case or tier (group of tests) "
51                                  "to be executed. It will run all the test "
52                                  "if not specified.")
53         self.parser.add_argument("-n", "--noclean", help="Do not clean "
54                                  "OpenStack resources after running each "
55                                  "test (default=false).",
56                                  action="store_true")
57         self.parser.add_argument("-r", "--report", help="Push results to "
58                                  "database (default=false).",
59                                  action="store_true")
60
61     def parse_args(self, argv=[]):
62         return vars(self.parser.parse_args(argv))
63
64
65 class Runner(object):
66
67     def __init__(self):
68         self.executed_test_cases = []
69         self.overall_result = Result.EX_OK
70         self.clean_flag = True
71         self.report_flag = False
72
73     @staticmethod
74     def print_separator(str, count=45):
75         line = ""
76         for i in range(0, count - 1):
77             line += str
78         logger.info("%s" % line)
79
80     @staticmethod
81     def source_rc_file():
82         rc_file = CONST.__getattribute__('openstack_creds')
83         if not os.path.isfile(rc_file):
84             raise Exception("RC file %s does not exist..." % rc_file)
85         logger.debug("Sourcing the OpenStack RC file...")
86         os_utils.source_credentials(rc_file)
87         for key, value in os.environ.iteritems():
88             if re.search("OS_", key):
89                 if key == 'OS_AUTH_URL':
90                     CONST.__setattr__('OS_AUTH_URL', value)
91                 elif key == 'OS_USERNAME':
92                     CONST.__setattr__('OS_USERNAME', value)
93                 elif key == 'OS_TENANT_NAME':
94                     CONST.__setattr__('OS_TENANT_NAME', value)
95                 elif key == 'OS_PASSWORD':
96                     CONST.__setattr__('OS_PASSWORD', value)
97
98     @staticmethod
99     def get_run_dict(testname):
100         try:
101             dict = ft_utils.get_dict_by_test(testname)
102             if not dict:
103                 logger.error("Cannot get {}'s config options".format(testname))
104             elif 'run' in dict:
105                 return dict['run']
106             return None
107         except Exception:
108             logger.exception("Cannot get {}'s config options".format(testname))
109             return None
110
111     def run_test(self, test, tier_name, testcases=None):
112         if not test.is_enabled():
113             raise TestNotEnabled(
114                 "The test case {} is not enabled".format(test.get_name()))
115         logger.info("\n")  # blank line
116         self.print_separator("=")
117         logger.info("Running test case '%s'...", test.get_name())
118         self.print_separator("=")
119         logger.debug("\n%s" % test)
120         self.source_rc_file()
121
122         flags = " -t %s" % test.get_name()
123         if self.report_flag:
124             flags += " -r"
125
126         result = testcase.TestCase.EX_RUN_ERROR
127         run_dict = self.get_run_dict(test.get_name())
128         if run_dict:
129             try:
130                 module = importlib.import_module(run_dict['module'])
131                 cls = getattr(module, run_dict['class'])
132                 test_dict = ft_utils.get_dict_by_test(test.get_name())
133                 test_case = cls(**test_dict)
134                 self.executed_test_cases.append(test_case)
135                 if self.clean_flag:
136                     if test_case.create_snapshot() != test_case.EX_OK:
137                         return result
138                 try:
139                     kwargs = run_dict['args']
140                     result = test_case.run(**kwargs)
141                 except KeyError:
142                     result = test_case.run()
143                 if result == testcase.TestCase.EX_OK:
144                     if self.report_flag:
145                         test_case.push_to_db()
146                     result = test_case.is_successful()
147                 logger.info("Test result:\n\n%s\n", test_case)
148                 if self.clean_flag:
149                     test_case.clean()
150             except ImportError:
151                 logger.exception("Cannot import module {}".format(
152                     run_dict['module']))
153             except AttributeError:
154                 logger.exception("Cannot get class {}".format(
155                     run_dict['class']))
156         else:
157             raise Exception("Cannot import the class for the test case.")
158
159         return result
160
161     def run_tier(self, tier):
162         tier_name = tier.get_name()
163         tests = tier.get_tests()
164         if tests is None or len(tests) == 0:
165             logger.info("There are no supported test cases in this tier "
166                         "for the given scenario")
167             return 0
168         logger.info("\n\n")  # blank line
169         self.print_separator("#")
170         logger.info("Running tier '%s'" % tier_name)
171         self.print_separator("#")
172         logger.debug("\n%s" % tier)
173         for test in tests:
174             result = self.run_test(test, tier_name)
175             if result != testcase.TestCase.EX_OK:
176                 logger.error("The test case '%s' failed.", test.get_name())
177                 self.overall_result = Result.EX_ERROR
178                 if test.is_blocking():
179                     raise BlockingTestFailed(
180                         "The test case {} failed and is blocking".format(
181                             test.get_name()))
182
183     def run_all(self, tiers):
184         summary = ""
185         tiers_to_run = []
186
187         for tier in tiers.get_tiers():
188             if (len(tier.get_tests()) != 0 and
189                     re.search(CONST.__getattribute__('CI_LOOP'),
190                               tier.get_ci_loop()) is not None):
191                 tiers_to_run.append(tier)
192                 summary += ("\n    - %s:\n\t   %s"
193                             % (tier.get_name(),
194                                tier.get_test_names()))
195
196         logger.info("Tests to be executed:%s" % summary)
197         for tier in tiers_to_run:
198             self.run_tier(tier)
199
200     def main(self, **kwargs):
201         _tiers = tb.TierBuilder(
202             CONST.__getattribute__('INSTALLER_TYPE'),
203             CONST.__getattribute__('DEPLOY_SCENARIO'),
204             CONST.__getattribute__("functest_testcases_yaml"))
205
206         if kwargs['noclean']:
207             self.clean_flag = False
208
209         if kwargs['report']:
210             self.report_flag = True
211
212         try:
213             if kwargs['test']:
214                 self.source_rc_file()
215                 logger.debug("Test args: %s", kwargs['test'])
216                 if _tiers.get_tier(kwargs['test']):
217                     self.run_tier(_tiers.get_tier(kwargs['test']))
218                 elif _tiers.get_test(kwargs['test']):
219                     result = self.run_test(
220                         _tiers.get_test(kwargs['test']),
221                         _tiers.get_tier_name(kwargs['test']),
222                         kwargs['test'])
223                     if result != testcase.TestCase.EX_OK:
224                         logger.error("The test case '%s' failed.",
225                                      kwargs['test'])
226                         self.overall_result = Result.EX_ERROR
227                 elif kwargs['test'] == "all":
228                     self.run_all(_tiers)
229                 else:
230                     logger.error("Unknown test case or tier '%s', "
231                                  "or not supported by "
232                                  "the given scenario '%s'."
233                                  % (kwargs['test'],
234                                     CONST.__getattribute__('DEPLOY_SCENARIO')))
235                     logger.debug("Available tiers are:\n\n%s",
236                                  _tiers)
237                     return Result.EX_ERROR
238             else:
239                 self.run_all(_tiers)
240         except BlockingTestFailed:
241             pass
242         except Exception:
243             logger.exception("Failures when running testcase(s)")
244             self.overall_result = Result.EX_ERROR
245
246         msg = prettytable.PrettyTable(
247             header_style='upper', padding_width=5,
248             field_names=['env var', 'value'])
249         for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
250                         'CI_LOOP']:
251             msg.add_row([env_var, CONST.__getattribute__(env_var)])
252         logger.info("Deployment description: \n\n%s\n", msg)
253
254         msg = prettytable.PrettyTable(
255             header_style='upper', padding_width=5,
256             field_names=['test case', 'project', 'tier', 'duration', 'result'])
257         for test_case in self.executed_test_cases:
258             result = 'PASS' if(test_case.is_successful(
259                     ) == test_case.EX_OK) else 'FAIL'
260             msg.add_row([test_case.case_name, test_case.project_name,
261                          _tiers.get_tier_name(test_case.case_name),
262                          test_case.get_duration(), result])
263         logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
264
265         logger.info("Execution exit value: %s" % self.overall_result)
266         return self.overall_result
267
268
269 if __name__ == '__main__':
270     logging.config.fileConfig(
271         CONST.__getattribute__('dir_functest_logging_cfg'))
272     parser = RunTestsParser()
273     args = parser.parse_args(sys.argv[1:])
274     runner = Runner()
275     sys.exit(runner.main(**args).value)