Set the ram of flavor 1024
[functest.git] / functest / ci / run_tests.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2016 Ericsson AB and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 import argparse
11 import enum
12 import importlib
13 import logging
14 import logging.config
15 import os
16 import pkg_resources
17 import re
18 import sys
19 import textwrap
20
21 import prettytable
22 import yaml
23
24 import functest.ci.tier_builder as tb
25 import functest.core.testcase as testcase
26 import functest.utils.functest_utils as ft_utils
27 import functest.utils.openstack_utils as os_utils
28 from functest.utils.constants import CONST
29
30 # __name__ cannot be used here
31 logger = logging.getLogger('functest.ci.run_tests')
32
33 CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
34     'functest', 'ci/config_functest.yaml')
35 CONFIG_PATCH_PATH = pkg_resources.resource_filename(
36     'functest', 'ci/config_patch.yaml')
37 CONFIG_AARCH64_PATCH_PATH = pkg_resources.resource_filename(
38     'functest', 'ci/config_aarch64_patch.yaml')
39 # set the architecture to default
40 pod_arch = os.getenv("POD_ARCH", None)
41 arch_filter = ['aarch64']
42
43
44 class Result(enum.Enum):
45     EX_OK = os.EX_OK
46     EX_ERROR = -1
47
48
49 class BlockingTestFailed(Exception):
50     pass
51
52
53 class TestNotEnabled(Exception):
54     pass
55
56
57 class RunTestsParser(object):
58
59     def __init__(self):
60         self.parser = argparse.ArgumentParser()
61         self.parser.add_argument("-t", "--test", dest="test", action='store',
62                                  help="Test case or tier (group of tests) "
63                                  "to be executed. It will run all the test "
64                                  "if not specified.")
65         self.parser.add_argument("-n", "--noclean", help="Do not clean "
66                                  "OpenStack resources after running each "
67                                  "test (default=false).",
68                                  action="store_true")
69         self.parser.add_argument("-r", "--report", help="Push results to "
70                                  "database (default=false).",
71                                  action="store_true")
72
73     def parse_args(self, argv=[]):
74         return vars(self.parser.parse_args(argv))
75
76
77 class Runner(object):
78
79     def __init__(self):
80         self.executed_test_cases = {}
81         self.overall_result = Result.EX_OK
82         self.clean_flag = True
83         self.report_flag = False
84         self._tiers = tb.TierBuilder(
85             CONST.__getattribute__('INSTALLER_TYPE'),
86             CONST.__getattribute__('DEPLOY_SCENARIO'),
87             pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
88
89     @staticmethod
90     def update_config_file():
91         Runner.patch_file(CONFIG_PATCH_PATH)
92
93         if pod_arch and pod_arch in arch_filter:
94             Runner.patch_file(CONFIG_AARCH64_PATCH_PATH)
95
96     @staticmethod
97     def patch_file(patch_file_path):
98         logger.debug('Updating file: %s', patch_file_path)
99         with open(patch_file_path) as f:
100             patch_file = yaml.safe_load(f)
101
102         updated = False
103         for key in patch_file:
104             if key in CONST.__getattribute__('DEPLOY_SCENARIO'):
105                 new_functest_yaml = dict(ft_utils.merge_dicts(
106                     ft_utils.get_functest_yaml(), patch_file[key]))
107                 updated = True
108
109         if updated:
110             os.remove(CONFIG_FUNCTEST_PATH)
111             with open(CONFIG_FUNCTEST_PATH, "w") as f:
112                 f.write(yaml.dump(new_functest_yaml, default_style='"'))
113
114     @staticmethod
115     def source_rc_file():
116         rc_file = CONST.__getattribute__('openstack_creds')
117         if not os.path.isfile(rc_file):
118             raise Exception("RC file %s does not exist..." % rc_file)
119         logger.debug("Sourcing the OpenStack RC file...")
120         os_utils.source_credentials(rc_file)
121         for key, value in os.environ.iteritems():
122             if re.search("OS_", key):
123                 if key == 'OS_AUTH_URL':
124                     CONST.__setattr__('OS_AUTH_URL', value)
125                 elif key == 'OS_USERNAME':
126                     CONST.__setattr__('OS_USERNAME', value)
127                 elif key == 'OS_TENANT_NAME':
128                     CONST.__setattr__('OS_TENANT_NAME', value)
129                 elif key == 'OS_PASSWORD':
130                     CONST.__setattr__('OS_PASSWORD', value)
131                 elif key == "OS_PROJECT_DOMAIN_NAME":
132                     CONST.__setattr__('OS_PROJECT_DOMAIN_NAME', value)
133
134     @staticmethod
135     def get_run_dict(testname):
136         try:
137             dict = ft_utils.get_dict_by_test(testname)
138             if not dict:
139                 logger.error("Cannot get {}'s config options".format(testname))
140             elif 'run' in dict:
141                 return dict['run']
142             return None
143         except Exception:
144             logger.exception("Cannot get {}'s config options".format(testname))
145             return None
146
147     def run_test(self, test):
148         if not test.is_enabled():
149             raise TestNotEnabled(
150                 "The test case {} is not enabled".format(test.get_name()))
151         logger.info("Running test case '%s'...", test.get_name())
152         result = testcase.TestCase.EX_RUN_ERROR
153         run_dict = self.get_run_dict(test.get_name())
154         if run_dict:
155             try:
156                 module = importlib.import_module(run_dict['module'])
157                 cls = getattr(module, run_dict['class'])
158                 test_dict = ft_utils.get_dict_by_test(test.get_name())
159                 test_case = cls(**test_dict)
160                 self.executed_test_cases[test.get_name()] = test_case
161                 if self.clean_flag:
162                     if test_case.create_snapshot() != test_case.EX_OK:
163                         return testcase.TestCase.EX_RUN_ERROR
164                 try:
165                     kwargs = run_dict['args']
166                     test_case.run(**kwargs)
167                 except KeyError:
168                     test_case.run()
169                 if self.report_flag:
170                     test_case.push_to_db()
171                 if test.get_project() == "functest":
172                     result = test_case.is_successful()
173                 else:
174                     result = testcase.TestCase.EX_OK
175                 logger.info("Test result:\n\n%s\n", test_case)
176                 if self.clean_flag:
177                     test_case.clean()
178             except ImportError:
179                 logger.exception("Cannot import module {}".format(
180                     run_dict['module']))
181             except AttributeError:
182                 logger.exception("Cannot get class {}".format(
183                     run_dict['class']))
184         else:
185             raise Exception("Cannot import the class for the test case.")
186         return result
187
188     def run_tier(self, tier):
189         tier_name = tier.get_name()
190         tests = tier.get_tests()
191         if tests is None or len(tests) == 0:
192             logger.info("There are no supported test cases in this tier "
193                         "for the given scenario")
194             self.overall_result = Result.EX_ERROR
195         else:
196             logger.info("Running tier '%s'" % tier_name)
197             for test in tests:
198                 self.run_test(test)
199                 test_case = self.executed_test_cases[test.get_name()]
200                 if test_case.is_successful() != testcase.TestCase.EX_OK:
201                     logger.error("The test case '%s' failed.", test.get_name())
202                     if test.get_project() == "functest":
203                         self.overall_result = Result.EX_ERROR
204                     if test.is_blocking():
205                         raise BlockingTestFailed(
206                             "The test case {} failed and is blocking".format(
207                                 test.get_name()))
208         return self.overall_result
209
210     def run_all(self):
211         tiers_to_run = []
212         msg = prettytable.PrettyTable(
213             header_style='upper', padding_width=5,
214             field_names=['tiers', 'order', 'CI Loop', 'description',
215                          'testcases'])
216         for tier in self._tiers.get_tiers():
217             if (len(tier.get_tests()) != 0 and
218                     re.search(CONST.__getattribute__('CI_LOOP'),
219                               tier.get_ci_loop()) is not None):
220                 tiers_to_run.append(tier)
221                 msg.add_row([tier.get_name(), tier.get_order(),
222                              tier.get_ci_loop(),
223                              textwrap.fill(tier.description, width=40),
224                              textwrap.fill(' '.join([str(x.get_name(
225                                  )) for x in tier.get_tests()]), width=40)])
226         logger.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
227         for tier in tiers_to_run:
228             self.run_tier(tier)
229
230     def main(self, **kwargs):
231         Runner.update_config_file()
232
233         if 'noclean' in kwargs:
234             self.clean_flag = not kwargs['noclean']
235         if 'report' in kwargs:
236             self.report_flag = kwargs['report']
237         try:
238             if 'test' in kwargs:
239                 self.source_rc_file()
240                 logger.debug("Test args: %s", kwargs['test'])
241                 if self._tiers.get_tier(kwargs['test']):
242                     self.run_tier(self._tiers.get_tier(kwargs['test']))
243                 elif self._tiers.get_test(kwargs['test']):
244                     result = self.run_test(
245                         self._tiers.get_test(kwargs['test']))
246                     if result != testcase.TestCase.EX_OK:
247                         logger.error("The test case '%s' failed.",
248                                      kwargs['test'])
249                         self.overall_result = Result.EX_ERROR
250                 elif kwargs['test'] == "all":
251                     self.run_all()
252                 else:
253                     logger.error("Unknown test case or tier '%s', "
254                                  "or not supported by "
255                                  "the given scenario '%s'."
256                                  % (kwargs['test'],
257                                     CONST.__getattribute__('DEPLOY_SCENARIO')))
258                     logger.debug("Available tiers are:\n\n%s",
259                                  self._tiers)
260                     return Result.EX_ERROR
261             else:
262                 self.run_all()
263         except BlockingTestFailed:
264             pass
265         except Exception:
266             logger.exception("Failures when running testcase(s)")
267             self.overall_result = Result.EX_ERROR
268         if not self._tiers.get_test(kwargs['test']):
269             self.summary(self._tiers.get_tier(kwargs['test']))
270         logger.info("Execution exit value: %s" % self.overall_result)
271         return self.overall_result
272
273     def summary(self, tier=None):
274         msg = prettytable.PrettyTable(
275             header_style='upper', padding_width=5,
276             field_names=['env var', 'value'])
277         for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
278                         'CI_LOOP']:
279             msg.add_row([env_var, CONST.__getattribute__(env_var)])
280         logger.info("Deployment description:\n\n%s\n", msg)
281         msg = prettytable.PrettyTable(
282             header_style='upper', padding_width=5,
283             field_names=['test case', 'project', 'tier',
284                          'duration', 'result'])
285         tiers = [tier] if tier else self._tiers.get_tiers()
286         for tier in tiers:
287             for test in tier.get_tests():
288                 try:
289                     test_case = self.executed_test_cases[test.get_name()]
290                 except KeyError:
291                     msg.add_row([test.get_name(), test.get_project(),
292                                  tier.get_name(), "00:00", "SKIP"])
293                 else:
294                     result = 'PASS' if(test_case.is_successful(
295                         ) == test_case.EX_OK) else 'FAIL'
296                     msg.add_row(
297                         [test_case.case_name, test_case.project_name,
298                          self._tiers.get_tier_name(test_case.case_name),
299                          test_case.get_duration(), result])
300             for test in tier.get_skipped_test():
301                 msg.add_row([test.get_name(), test.get_project(),
302                              tier.get_name(), "00:00", "SKIP"])
303         logger.info("FUNCTEST REPORT:\n\n%s\n", msg)
304
305
306 def main():
307     logging.config.fileConfig(pkg_resources.resource_filename(
308         'functest', 'ci/logging.ini'))
309     logging.captureWarnings(True)
310     parser = RunTestsParser()
311     args = parser.parse_args(sys.argv[1:])
312     runner = Runner()
313     return runner.main(**args).value