Enable promise on all fuel and joid scenarii
[functest.git] / functest / ci / run_tests.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2016 Ericsson AB and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 import argparse
11 import enum
12 import importlib
13 import logging
14 import logging.config
15 import os
16 import pkg_resources
17 import re
18 import sys
19 import textwrap
20
21 import prettytable
22
23 import functest.ci.tier_builder as tb
24 import functest.core.testcase as testcase
25 import functest.utils.functest_utils as ft_utils
26 import functest.utils.openstack_utils as os_utils
27 from functest.utils.constants import CONST
28
29 # __name__ cannot be used here
30 logger = logging.getLogger('functest.ci.run_tests')
31
32
33 class Result(enum.Enum):
34     EX_OK = os.EX_OK
35     EX_ERROR = -1
36
37
38 class BlockingTestFailed(Exception):
39     pass
40
41
42 class TestNotEnabled(Exception):
43     pass
44
45
46 class RunTestsParser(object):
47
48     def __init__(self):
49         self.parser = argparse.ArgumentParser()
50         self.parser.add_argument("-t", "--test", dest="test", action='store',
51                                  help="Test case or tier (group of tests) "
52                                  "to be executed. It will run all the test "
53                                  "if not specified.")
54         self.parser.add_argument("-n", "--noclean", help="Do not clean "
55                                  "OpenStack resources after running each "
56                                  "test (default=false).",
57                                  action="store_true")
58         self.parser.add_argument("-r", "--report", help="Push results to "
59                                  "database (default=false).",
60                                  action="store_true")
61
62     def parse_args(self, argv=[]):
63         return vars(self.parser.parse_args(argv))
64
65
66 class Runner(object):
67
68     def __init__(self):
69         self.executed_test_cases = {}
70         self.overall_result = Result.EX_OK
71         self.clean_flag = True
72         self.report_flag = False
73         self._tiers = tb.TierBuilder(
74             CONST.__getattribute__('INSTALLER_TYPE'),
75             CONST.__getattribute__('DEPLOY_SCENARIO'),
76             pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
77
78     @staticmethod
79     def source_rc_file():
80         rc_file = CONST.__getattribute__('openstack_creds')
81         if not os.path.isfile(rc_file):
82             raise Exception("RC file %s does not exist..." % rc_file)
83         logger.debug("Sourcing the OpenStack RC file...")
84         os_utils.source_credentials(rc_file)
85         for key, value in os.environ.iteritems():
86             if re.search("OS_", key):
87                 if key == 'OS_AUTH_URL':
88                     CONST.__setattr__('OS_AUTH_URL', value)
89                 elif key == 'OS_USERNAME':
90                     CONST.__setattr__('OS_USERNAME', value)
91                 elif key == 'OS_TENANT_NAME':
92                     CONST.__setattr__('OS_TENANT_NAME', value)
93                 elif key == 'OS_PASSWORD':
94                     CONST.__setattr__('OS_PASSWORD', value)
95                 elif key == "OS_PROJECT_DOMAIN_NAME":
96                     CONST.__setattr__('OS_PROJECT_DOMAIN_NAME', value)
97
98     @staticmethod
99     def get_run_dict(testname):
100         try:
101             dict = ft_utils.get_dict_by_test(testname)
102             if not dict:
103                 logger.error("Cannot get {}'s config options".format(testname))
104             elif 'run' in dict:
105                 return dict['run']
106             return None
107         except Exception:
108             logger.exception("Cannot get {}'s config options".format(testname))
109             return None
110
111     def run_test(self, test):
112         if not test.is_enabled():
113             raise TestNotEnabled(
114                 "The test case {} is not enabled".format(test.get_name()))
115         logger.info("Running test case '%s'...", test.get_name())
116         result = testcase.TestCase.EX_RUN_ERROR
117         run_dict = self.get_run_dict(test.get_name())
118         if run_dict:
119             try:
120                 module = importlib.import_module(run_dict['module'])
121                 cls = getattr(module, run_dict['class'])
122                 test_dict = ft_utils.get_dict_by_test(test.get_name())
123                 test_case = cls(**test_dict)
124                 self.executed_test_cases[test.get_name()] = test_case
125                 if self.clean_flag:
126                     if test_case.create_snapshot() != test_case.EX_OK:
127                         return testcase.TestCase.EX_RUN_ERROR
128                 try:
129                     kwargs = run_dict['args']
130                     test_case.run(**kwargs)
131                 except KeyError:
132                     test_case.run()
133                 if self.report_flag:
134                     test_case.push_to_db()
135                 if test.get_project() == "functest":
136                     result = test_case.is_successful()
137                 else:
138                     result = testcase.TestCase.EX_OK
139                 logger.info("Test result:\n\n%s\n", test_case)
140                 if self.clean_flag:
141                     test_case.clean()
142             except ImportError:
143                 logger.exception("Cannot import module {}".format(
144                     run_dict['module']))
145             except AttributeError:
146                 logger.exception("Cannot get class {}".format(
147                     run_dict['class']))
148         else:
149             raise Exception("Cannot import the class for the test case.")
150         return result
151
152     def run_tier(self, tier):
153         tier_name = tier.get_name()
154         tests = tier.get_tests()
155         if tests is None or len(tests) == 0:
156             logger.info("There are no supported test cases in this tier "
157                         "for the given scenario")
158             self.overall_result = Result.EX_ERROR
159         else:
160             logger.info("Running tier '%s'" % tier_name)
161             for test in tests:
162                 self.run_test(test)
163                 test_case = self.executed_test_cases[test.get_name()]
164                 if test_case.is_successful() != testcase.TestCase.EX_OK:
165                     logger.error("The test case '%s' failed.", test.get_name())
166                     if test.get_project() == "functest":
167                         self.overall_result = Result.EX_ERROR
168                     if test.is_blocking():
169                         raise BlockingTestFailed(
170                             "The test case {} failed and is blocking".format(
171                                 test.get_name()))
172         return self.overall_result
173
174     def run_all(self):
175         tiers_to_run = []
176         msg = prettytable.PrettyTable(
177             header_style='upper', padding_width=5,
178             field_names=['tiers', 'order', 'CI Loop', 'description',
179                          'testcases'])
180         for tier in self._tiers.get_tiers():
181             if (len(tier.get_tests()) != 0 and
182                     re.search(CONST.__getattribute__('CI_LOOP'),
183                               tier.get_ci_loop()) is not None):
184                 tiers_to_run.append(tier)
185                 msg.add_row([tier.get_name(), tier.get_order(),
186                              tier.get_ci_loop(),
187                              textwrap.fill(tier.description, width=40),
188                              textwrap.fill(' '.join([str(x.get_name(
189                                  )) for x in tier.get_tests()]), width=40)])
190         logger.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
191         for tier in tiers_to_run:
192             self.run_tier(tier)
193
194     def main(self, **kwargs):
195         if 'noclean' in kwargs:
196             self.clean_flag = not kwargs['noclean']
197         if 'report' in kwargs:
198             self.report_flag = kwargs['report']
199         try:
200             if 'test' in kwargs:
201                 self.source_rc_file()
202                 logger.debug("Test args: %s", kwargs['test'])
203                 if self._tiers.get_tier(kwargs['test']):
204                     self.run_tier(self._tiers.get_tier(kwargs['test']))
205                 elif self._tiers.get_test(kwargs['test']):
206                     result = self.run_test(
207                         self._tiers.get_test(kwargs['test']))
208                     if result != testcase.TestCase.EX_OK:
209                         logger.error("The test case '%s' failed.",
210                                      kwargs['test'])
211                         self.overall_result = Result.EX_ERROR
212                 elif kwargs['test'] == "all":
213                     self.run_all()
214                 else:
215                     logger.error("Unknown test case or tier '%s', "
216                                  "or not supported by "
217                                  "the given scenario '%s'."
218                                  % (kwargs['test'],
219                                     CONST.__getattribute__('DEPLOY_SCENARIO')))
220                     logger.debug("Available tiers are:\n\n%s",
221                                  self._tiers)
222                     return Result.EX_ERROR
223             else:
224                 self.run_all()
225         except BlockingTestFailed:
226             pass
227         except Exception:
228             logger.exception("Failures when running testcase(s)")
229             self.overall_result = Result.EX_ERROR
230         if not self._tiers.get_test(kwargs['test']):
231             self.summary(self._tiers.get_tier(kwargs['test']))
232         logger.info("Execution exit value: %s" % self.overall_result)
233         return self.overall_result
234
235     def summary(self, tier=None):
236         msg = prettytable.PrettyTable(
237             header_style='upper', padding_width=5,
238             field_names=['env var', 'value'])
239         for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
240                         'CI_LOOP']:
241             msg.add_row([env_var, CONST.__getattribute__(env_var)])
242         logger.info("Deployment description:\n\n%s\n", msg)
243         msg = prettytable.PrettyTable(
244             header_style='upper', padding_width=5,
245             field_names=['test case', 'project', 'tier',
246                          'duration', 'result'])
247         tiers = [tier] if tier else self._tiers.get_tiers()
248         for tier in tiers:
249             for test in tier.get_tests():
250                 try:
251                     test_case = self.executed_test_cases[test.get_name()]
252                 except KeyError:
253                     msg.add_row([test.get_name(), test.get_project(),
254                                  tier.get_name(), "00:00", "SKIP"])
255                 else:
256                     result = 'PASS' if(test_case.is_successful(
257                         ) == test_case.EX_OK) else 'FAIL'
258                     msg.add_row(
259                         [test_case.case_name, test_case.project_name,
260                          self._tiers.get_tier_name(test_case.case_name),
261                          test_case.get_duration(), result])
262             for test in tier.get_skipped_test():
263                 msg.add_row([test.get_name(), test.get_project(),
264                              tier.get_name(), "00:00", "SKIP"])
265         logger.info("FUNCTEST REPORT:\n\n%s\n", msg)
266
267
268 def main():
269     logging.config.fileConfig(pkg_resources.resource_filename(
270         'functest', 'ci/logging.ini'))
271     logging.captureWarnings(True)
272     parser = RunTestsParser()
273     args = parser.parse_args(sys.argv[1:])
274     runner = Runner()
275     return runner.main(**args).value