Merge "Create RobotFramework class"
[functest.git] / functest / ci / run_tests.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2016 Ericsson AB and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 import argparse
11 import enum
12 import importlib
13 import logging
14 import logging.config
15 import os
16 import pkg_resources
17 import re
18 import sys
19 import textwrap
20
21 import prettytable
22 import yaml
23
24 import functest.ci.tier_builder as tb
25 import functest.core.testcase as testcase
26 import functest.utils.functest_utils as ft_utils
27 import functest.utils.openstack_utils as os_utils
28 from functest.utils.constants import CONST
29
30 # __name__ cannot be used here
31 logger = logging.getLogger('functest.ci.run_tests')
32
33 CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
34     'functest', 'ci/config_functest.yaml')
35 CONFIG_PATCH_PATH = pkg_resources.resource_filename(
36     'functest', 'ci/config_patch.yaml')
37 CONFIG_AARCH64_PATCH_PATH = pkg_resources.resource_filename(
38     'functest', 'ci/config_aarch64_patch.yaml')
39 # set the architecture to default
40 pod_arch = os.getenv("POD_ARCH", None)
41 arch_filter = ['aarch64']
42
43
44 class Result(enum.Enum):
45     EX_OK = os.EX_OK
46     EX_ERROR = -1
47
48
49 class BlockingTestFailed(Exception):
50     pass
51
52
53 class TestNotEnabled(Exception):
54     pass
55
56
57 class RunTestsParser(object):
58
59     def __init__(self):
60         self.parser = argparse.ArgumentParser()
61         self.parser.add_argument("-t", "--test", dest="test", action='store',
62                                  help="Test case or tier (group of tests) "
63                                  "to be executed. It will run all the test "
64                                  "if not specified.")
65         self.parser.add_argument("-n", "--noclean", help="Do not clean "
66                                  "OpenStack resources after running each "
67                                  "test (default=false).",
68                                  action="store_true")
69         self.parser.add_argument("-r", "--report", help="Push results to "
70                                  "database (default=false).",
71                                  action="store_true")
72
73     def parse_args(self, argv=[]):
74         return vars(self.parser.parse_args(argv))
75
76
77 class Runner(object):
78
79     def __init__(self):
80         self.executed_test_cases = {}
81         self.overall_result = Result.EX_OK
82         self.clean_flag = True
83         self.report_flag = False
84         self._tiers = tb.TierBuilder(
85             CONST.__getattribute__('INSTALLER_TYPE'),
86             CONST.__getattribute__('DEPLOY_SCENARIO'),
87             pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
88
89     @staticmethod
90     def update_config_file():
91         Runner.patch_file(CONFIG_PATCH_PATH)
92
93         if pod_arch and pod_arch in arch_filter:
94             Runner.patch_file(CONFIG_AARCH64_PATCH_PATH)
95
96         if "TEST_DB_URL" in os.environ:
97             Runner.update_db_url()
98
99     @staticmethod
100     def patch_file(patch_file_path):
101         logger.debug('Updating file: %s', patch_file_path)
102         with open(patch_file_path) as f:
103             patch_file = yaml.safe_load(f)
104
105         updated = False
106         for key in patch_file:
107             if key in CONST.__getattribute__('DEPLOY_SCENARIO'):
108                 new_functest_yaml = dict(ft_utils.merge_dicts(
109                     ft_utils.get_functest_yaml(), patch_file[key]))
110                 updated = True
111
112         if updated:
113             os.remove(CONFIG_FUNCTEST_PATH)
114             with open(CONFIG_FUNCTEST_PATH, "w") as f:
115                 f.write(yaml.dump(new_functest_yaml, default_style='"'))
116
117     @staticmethod
118     def update_db_url():
119         with open(CONFIG_FUNCTEST_PATH) as f:
120             functest_yaml = yaml.safe_load(f)
121
122         with open(CONFIG_FUNCTEST_PATH, "w") as f:
123             functest_yaml["results"]["test_db_url"] = os.environ.get(
124                 'TEST_DB_URL')
125             f.write(yaml.dump(functest_yaml, default_style='"'))
126
127     @staticmethod
128     def source_rc_file():
129         rc_file = CONST.__getattribute__('openstack_creds')
130         if not os.path.isfile(rc_file):
131             raise Exception("RC file %s does not exist..." % rc_file)
132         logger.debug("Sourcing the OpenStack RC file...")
133         os_utils.source_credentials(rc_file)
134         for key, value in os.environ.iteritems():
135             if re.search("OS_", key):
136                 if key == 'OS_AUTH_URL':
137                     CONST.__setattr__('OS_AUTH_URL', value)
138                 elif key == 'OS_USERNAME':
139                     CONST.__setattr__('OS_USERNAME', value)
140                 elif key == 'OS_TENANT_NAME':
141                     CONST.__setattr__('OS_TENANT_NAME', value)
142                 elif key == 'OS_PASSWORD':
143                     CONST.__setattr__('OS_PASSWORD', value)
144                 elif key == "OS_PROJECT_DOMAIN_NAME":
145                     CONST.__setattr__('OS_PROJECT_DOMAIN_NAME', value)
146
147     @staticmethod
148     def get_run_dict(testname):
149         try:
150             dict = ft_utils.get_dict_by_test(testname)
151             if not dict:
152                 logger.error("Cannot get {}'s config options".format(testname))
153             elif 'run' in dict:
154                 return dict['run']
155             return None
156         except Exception:
157             logger.exception("Cannot get {}'s config options".format(testname))
158             return None
159
160     def run_test(self, test):
161         if not test.is_enabled():
162             raise TestNotEnabled(
163                 "The test case {} is not enabled".format(test.get_name()))
164         logger.info("Running test case '%s'...", test.get_name())
165         result = testcase.TestCase.EX_RUN_ERROR
166         run_dict = self.get_run_dict(test.get_name())
167         if run_dict:
168             try:
169                 module = importlib.import_module(run_dict['module'])
170                 cls = getattr(module, run_dict['class'])
171                 test_dict = ft_utils.get_dict_by_test(test.get_name())
172                 test_case = cls(**test_dict)
173                 self.executed_test_cases[test.get_name()] = test_case
174                 if self.clean_flag:
175                     if test_case.create_snapshot() != test_case.EX_OK:
176                         return testcase.TestCase.EX_RUN_ERROR
177                 try:
178                     kwargs = run_dict['args']
179                     test_case.run(**kwargs)
180                 except KeyError:
181                     test_case.run()
182                 if self.report_flag:
183                     test_case.push_to_db()
184                 if test.get_project() == "functest":
185                     result = test_case.is_successful()
186                 else:
187                     result = testcase.TestCase.EX_OK
188                 logger.info("Test result:\n\n%s\n", test_case)
189                 if self.clean_flag:
190                     test_case.clean()
191             except ImportError:
192                 logger.exception("Cannot import module {}".format(
193                     run_dict['module']))
194             except AttributeError:
195                 logger.exception("Cannot get class {}".format(
196                     run_dict['class']))
197         else:
198             raise Exception("Cannot import the class for the test case.")
199         return result
200
201     def run_tier(self, tier):
202         tier_name = tier.get_name()
203         tests = tier.get_tests()
204         if tests is None or len(tests) == 0:
205             logger.info("There are no supported test cases in this tier "
206                         "for the given scenario")
207             self.overall_result = Result.EX_ERROR
208         else:
209             logger.info("Running tier '%s'" % tier_name)
210             for test in tests:
211                 self.run_test(test)
212                 test_case = self.executed_test_cases[test.get_name()]
213                 if test_case.is_successful() != testcase.TestCase.EX_OK:
214                     logger.error("The test case '%s' failed.", test.get_name())
215                     if test.get_project() == "functest":
216                         self.overall_result = Result.EX_ERROR
217                     if test.is_blocking():
218                         raise BlockingTestFailed(
219                             "The test case {} failed and is blocking".format(
220                                 test.get_name()))
221         return self.overall_result
222
223     def run_all(self):
224         tiers_to_run = []
225         msg = prettytable.PrettyTable(
226             header_style='upper', padding_width=5,
227             field_names=['tiers', 'order', 'CI Loop', 'description',
228                          'testcases'])
229         for tier in self._tiers.get_tiers():
230             if (len(tier.get_tests()) != 0 and
231                     re.search(CONST.__getattribute__('CI_LOOP'),
232                               tier.get_ci_loop()) is not None):
233                 tiers_to_run.append(tier)
234                 msg.add_row([tier.get_name(), tier.get_order(),
235                              tier.get_ci_loop(),
236                              textwrap.fill(tier.description, width=40),
237                              textwrap.fill(' '.join([str(x.get_name(
238                                  )) for x in tier.get_tests()]), width=40)])
239         logger.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
240         for tier in tiers_to_run:
241             self.run_tier(tier)
242
243     def main(self, **kwargs):
244         Runner.update_config_file()
245
246         if 'noclean' in kwargs:
247             self.clean_flag = not kwargs['noclean']
248         if 'report' in kwargs:
249             self.report_flag = kwargs['report']
250         try:
251             if 'test' in kwargs:
252                 self.source_rc_file()
253                 logger.debug("Test args: %s", kwargs['test'])
254                 if self._tiers.get_tier(kwargs['test']):
255                     self.run_tier(self._tiers.get_tier(kwargs['test']))
256                 elif self._tiers.get_test(kwargs['test']):
257                     result = self.run_test(
258                         self._tiers.get_test(kwargs['test']))
259                     if result != testcase.TestCase.EX_OK:
260                         logger.error("The test case '%s' failed.",
261                                      kwargs['test'])
262                         self.overall_result = Result.EX_ERROR
263                 elif kwargs['test'] == "all":
264                     self.run_all()
265                 else:
266                     logger.error("Unknown test case or tier '%s', "
267                                  "or not supported by "
268                                  "the given scenario '%s'."
269                                  % (kwargs['test'],
270                                     CONST.__getattribute__('DEPLOY_SCENARIO')))
271                     logger.debug("Available tiers are:\n\n%s",
272                                  self._tiers)
273                     return Result.EX_ERROR
274             else:
275                 self.run_all()
276         except BlockingTestFailed:
277             pass
278         except Exception:
279             logger.exception("Failures when running testcase(s)")
280             self.overall_result = Result.EX_ERROR
281         if not self._tiers.get_test(kwargs['test']):
282             self.summary(self._tiers.get_tier(kwargs['test']))
283         logger.info("Execution exit value: %s" % self.overall_result)
284         return self.overall_result
285
286     def summary(self, tier=None):
287         msg = prettytable.PrettyTable(
288             header_style='upper', padding_width=5,
289             field_names=['env var', 'value'])
290         for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
291                         'CI_LOOP']:
292             msg.add_row([env_var, CONST.__getattribute__(env_var)])
293         logger.info("Deployment description:\n\n%s\n", msg)
294         msg = prettytable.PrettyTable(
295             header_style='upper', padding_width=5,
296             field_names=['test case', 'project', 'tier',
297                          'duration', 'result'])
298         tiers = [tier] if tier else self._tiers.get_tiers()
299         for tier in tiers:
300             for test in tier.get_tests():
301                 try:
302                     test_case = self.executed_test_cases[test.get_name()]
303                 except KeyError:
304                     msg.add_row([test.get_name(), test.get_project(),
305                                  tier.get_name(), "00:00", "SKIP"])
306                 else:
307                     result = 'PASS' if(test_case.is_successful(
308                         ) == test_case.EX_OK) else 'FAIL'
309                     msg.add_row(
310                         [test_case.case_name, test_case.project_name,
311                          self._tiers.get_tier_name(test_case.case_name),
312                          test_case.get_duration(), result])
313             for test in tier.get_skipped_test():
314                 msg.add_row([test.get_name(), test.get_project(),
315                              tier.get_name(), "00:00", "SKIP"])
316         logger.info("FUNCTEST REPORT:\n\n%s\n", msg)
317
318
319 def main():
320     logging.config.fileConfig(pkg_resources.resource_filename(
321         'functest', 'ci/logging.ini'))
322     logging.captureWarnings(True)
323     parser = RunTestsParser()
324     args = parser.parse_args(sys.argv[1:])
325     runner = Runner()
326     return runner.main(**args).value