-#!/usr/bin/python -u
-#
-# Author: Jose Lausuch (jose.lausuch@ericsson.com)
+#!/usr/bin/env python
+
+# Copyright (c) 2016 Ericsson AB and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-#
import argparse
import enum
import logging
import logging.config
import os
+import pkg_resources
import re
import sys
+import textwrap
import prettytable
+import yaml
import functest.ci.tier_builder as tb
import functest.core.testcase as testcase
import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_clean as os_clean
-import functest.utils.openstack_snapshot as os_snapshot
import functest.utils.openstack_utils as os_utils
from functest.utils.constants import CONST
# __name__ cannot be used here
logger = logging.getLogger('functest.ci.run_tests')
+CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
+ 'functest', 'ci/config_functest.yaml')
+CONFIG_PATCH_PATH = pkg_resources.resource_filename(
+ 'functest', 'ci/config_patch.yaml')
+CONFIG_AARCH64_PATCH_PATH = pkg_resources.resource_filename(
+ 'functest', 'ci/config_aarch64_patch.yaml')
+# set the architecture to default
+pod_arch = os.getenv("POD_ARCH", None)
+arch_filter = ['aarch64']
+
class Result(enum.Enum):
EX_OK = os.EX_OK
class Runner(object):
def __init__(self):
- self.executed_test_cases = []
+ self.executed_test_cases = {}
self.overall_result = Result.EX_OK
self.clean_flag = True
self.report_flag = False
+ self._tiers = tb.TierBuilder(
+ CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
+
+ @staticmethod
+ def update_config_file():
+ Runner.patch_file(CONFIG_PATCH_PATH)
+
+ if pod_arch and pod_arch in arch_filter:
+ Runner.patch_file(CONFIG_AARCH64_PATCH_PATH)
+
+ if "TEST_DB_URL" in os.environ:
+ Runner.update_db_url()
+
+ @staticmethod
+ def patch_file(patch_file_path):
+ logger.debug('Updating file: %s', patch_file_path)
+ with open(patch_file_path) as f:
+ patch_file = yaml.safe_load(f)
+
+ updated = False
+ for key in patch_file:
+ if key in CONST.__getattribute__('DEPLOY_SCENARIO'):
+ new_functest_yaml = dict(ft_utils.merge_dicts(
+ ft_utils.get_functest_yaml(), patch_file[key]))
+ updated = True
+
+ if updated:
+ os.remove(CONFIG_FUNCTEST_PATH)
+ with open(CONFIG_FUNCTEST_PATH, "w") as f:
+ f.write(yaml.dump(new_functest_yaml, default_style='"'))
@staticmethod
- def print_separator(str, count=45):
- line = ""
- for i in range(0, count - 1):
- line += str
- logger.info("%s" % line)
+ def update_db_url():
+ with open(CONFIG_FUNCTEST_PATH) as f:
+ functest_yaml = yaml.safe_load(f)
+
+ with open(CONFIG_FUNCTEST_PATH, "w") as f:
+ functest_yaml["results"]["test_db_url"] = os.environ.get(
+ 'TEST_DB_URL')
+ f.write(yaml.dump(functest_yaml, default_style='"'))
@staticmethod
def source_rc_file():
CONST.__setattr__('OS_TENANT_NAME', value)
elif key == 'OS_PASSWORD':
CONST.__setattr__('OS_PASSWORD', value)
-
- @staticmethod
- def generate_os_snapshot():
- os_snapshot.main()
-
- @staticmethod
- def cleanup():
- os_clean.main()
+ elif key == "OS_PROJECT_DOMAIN_NAME":
+ CONST.__setattr__('OS_PROJECT_DOMAIN_NAME', value)
@staticmethod
def get_run_dict(testname):
logger.exception("Cannot get {}'s config options".format(testname))
return None
- def run_test(self, test, tier_name, testcases=None):
+ def run_test(self, test):
if not test.is_enabled():
raise TestNotEnabled(
"The test case {} is not enabled".format(test.get_name()))
- logger.info("\n") # blank line
- self.print_separator("=")
- logger.info("Running test case '%s'..." % test.get_name())
- self.print_separator("=")
- logger.debug("\n%s" % test)
- self.source_rc_file()
-
- if test.needs_clean() and self.clean_flag:
- self.generate_os_snapshot()
-
- flags = " -t %s" % test.get_name()
- if self.report_flag:
- flags += " -r"
-
+ logger.info("Running test case '%s'...", test.get_name())
result = testcase.TestCase.EX_RUN_ERROR
run_dict = self.get_run_dict(test.get_name())
if run_dict:
cls = getattr(module, run_dict['class'])
test_dict = ft_utils.get_dict_by_test(test.get_name())
test_case = cls(**test_dict)
- self.executed_test_cases.append(test_case)
+ self.executed_test_cases[test.get_name()] = test_case
+ if self.clean_flag:
+ if test_case.create_snapshot() != test_case.EX_OK:
+ return testcase.TestCase.EX_RUN_ERROR
try:
kwargs = run_dict['args']
- result = test_case.run(**kwargs)
+ test_case.run(**kwargs)
except KeyError:
- result = test_case.run()
- if result == testcase.TestCase.EX_OK:
- if self.report_flag:
- test_case.push_to_db()
+ test_case.run()
+ if self.report_flag:
+ test_case.push_to_db()
+ if test.get_project() == "functest":
result = test_case.is_successful()
+ else:
+ result = testcase.TestCase.EX_OK
logger.info("Test result:\n\n%s\n", test_case)
+ if self.clean_flag:
+ test_case.clean()
except ImportError:
logger.exception("Cannot import module {}".format(
run_dict['module']))
run_dict['class']))
else:
raise Exception("Cannot import the class for the test case.")
-
- if test.needs_clean() and self.clean_flag:
- self.cleanup()
- if result != testcase.TestCase.EX_OK:
- logger.error("The test case '%s' failed. " % test.get_name())
- self.overall_result = Result.EX_ERROR
- if test.is_blocking():
- raise BlockingTestFailed(
- "The test case {} failed and is blocking".format(
- test.get_name()))
+ return result
def run_tier(self, tier):
tier_name = tier.get_name()
if tests is None or len(tests) == 0:
logger.info("There are no supported test cases in this tier "
"for the given scenario")
- return 0
- logger.info("\n\n") # blank line
- self.print_separator("#")
- logger.info("Running tier '%s'" % tier_name)
- self.print_separator("#")
- logger.debug("\n%s" % tier)
- for test in tests:
- self.run_test(test, tier_name)
-
- def run_all(self, tiers):
- summary = ""
- tiers_to_run = []
+ self.overall_result = Result.EX_ERROR
+ else:
+ logger.info("Running tier '%s'" % tier_name)
+ for test in tests:
+ self.run_test(test)
+ test_case = self.executed_test_cases[test.get_name()]
+ if test_case.is_successful() != testcase.TestCase.EX_OK:
+ logger.error("The test case '%s' failed.", test.get_name())
+ if test.get_project() == "functest":
+ self.overall_result = Result.EX_ERROR
+ if test.is_blocking():
+ raise BlockingTestFailed(
+ "The test case {} failed and is blocking".format(
+ test.get_name()))
+ return self.overall_result
- for tier in tiers.get_tiers():
+ def run_all(self):
+ tiers_to_run = []
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['tiers', 'order', 'CI Loop', 'description',
+ 'testcases'])
+ for tier in self._tiers.get_tiers():
if (len(tier.get_tests()) != 0 and
re.search(CONST.__getattribute__('CI_LOOP'),
tier.get_ci_loop()) is not None):
tiers_to_run.append(tier)
- summary += ("\n - %s:\n\t %s"
- % (tier.get_name(),
- tier.get_test_names()))
-
- logger.info("Tests to be executed:%s" % summary)
+ msg.add_row([tier.get_name(), tier.get_order(),
+ tier.get_ci_loop(),
+ textwrap.fill(tier.description, width=40),
+ textwrap.fill(' '.join([str(x.get_name(
+ )) for x in tier.get_tests()]), width=40)])
+ logger.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
for tier in tiers_to_run:
self.run_tier(tier)
def main(self, **kwargs):
- _tiers = tb.TierBuilder(
- CONST.__getattribute__('INSTALLER_TYPE'),
- CONST.__getattribute__('DEPLOY_SCENARIO'),
- CONST.__getattribute__("functest_testcases_yaml"))
-
- if kwargs['noclean']:
- self.clean_flag = False
-
- if kwargs['report']:
- self.report_flag = True
+ Runner.update_config_file()
+ if 'noclean' in kwargs:
+ self.clean_flag = not kwargs['noclean']
+ if 'report' in kwargs:
+ self.report_flag = kwargs['report']
try:
- if kwargs['test']:
+ if 'test' in kwargs:
self.source_rc_file()
- logger.error(kwargs['test'])
- if _tiers.get_tier(kwargs['test']):
- self.run_tier(_tiers.get_tier(kwargs['test']))
- elif _tiers.get_test(kwargs['test']):
- self.run_test(_tiers.get_test(kwargs['test']),
- _tiers.get_tier_name(kwargs['test']),
- kwargs['test'])
+ logger.debug("Test args: %s", kwargs['test'])
+ if self._tiers.get_tier(kwargs['test']):
+ self.run_tier(self._tiers.get_tier(kwargs['test']))
+ elif self._tiers.get_test(kwargs['test']):
+ result = self.run_test(
+ self._tiers.get_test(kwargs['test']))
+ if result != testcase.TestCase.EX_OK:
+ logger.error("The test case '%s' failed.",
+ kwargs['test'])
+ self.overall_result = Result.EX_ERROR
elif kwargs['test'] == "all":
- self.run_all(_tiers)
+ self.run_all()
else:
logger.error("Unknown test case or tier '%s', "
"or not supported by "
% (kwargs['test'],
CONST.__getattribute__('DEPLOY_SCENARIO')))
logger.debug("Available tiers are:\n\n%s",
- _tiers)
+ self._tiers)
return Result.EX_ERROR
else:
- self.run_all(_tiers)
+ self.run_all()
except BlockingTestFailed:
pass
except Exception:
logger.exception("Failures when running testcase(s)")
self.overall_result = Result.EX_ERROR
+ if not self._tiers.get_test(kwargs['test']):
+ self.summary(self._tiers.get_tier(kwargs['test']))
+ logger.info("Execution exit value: %s" % self.overall_result)
+ return self.overall_result
+ def summary(self, tier=None):
msg = prettytable.PrettyTable(
header_style='upper', padding_width=5,
field_names=['env var', 'value'])
for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
'CI_LOOP']:
msg.add_row([env_var, CONST.__getattribute__(env_var)])
- logger.info("Deployment description: \n\n%s\n", msg)
-
+ logger.info("Deployment description:\n\n%s\n", msg)
msg = prettytable.PrettyTable(
header_style='upper', padding_width=5,
- field_names=['test case', 'project', 'tier', 'duration', 'result'])
- for test_case in self.executed_test_cases:
- result = 'PASS' if(test_case.is_successful(
- ) == test_case.EX_OK) else 'FAIL'
- msg.add_row([test_case.case_name, test_case.project_name,
- _tiers.get_tier_name(test_case.case_name),
+ field_names=['test case', 'project', 'tier',
+ 'duration', 'result'])
+ tiers = [tier] if tier else self._tiers.get_tiers()
+ for tier in tiers:
+ for test in tier.get_tests():
+ try:
+ test_case = self.executed_test_cases[test.get_name()]
+ except KeyError:
+ msg.add_row([test.get_name(), test.get_project(),
+ tier.get_name(), "00:00", "SKIP"])
+ else:
+ result = 'PASS' if(test_case.is_successful(
+ ) == test_case.EX_OK) else 'FAIL'
+ msg.add_row(
+ [test_case.case_name, test_case.project_name,
+ self._tiers.get_tier_name(test_case.case_name),
test_case.get_duration(), result])
- logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
-
- logger.info("Execution exit value: %s" % self.overall_result)
- return self.overall_result
+ for test in tier.get_skipped_test():
+ msg.add_row([test.get_name(), test.get_project(),
+ tier.get_name(), "00:00", "SKIP"])
+ logger.info("FUNCTEST REPORT:\n\n%s\n", msg)
-if __name__ == '__main__':
- logging.config.fileConfig(
- CONST.__getattribute__('dir_functest_logging_cfg'))
+def main():
+ logging.config.fileConfig(pkg_resources.resource_filename(
+ 'functest', 'ci/logging.ini'))
+ logging.captureWarnings(True)
parser = RunTestsParser()
args = parser.parse_args(sys.argv[1:])
runner = Runner()
- sys.exit(runner.main(**args).value)
+ return runner.main(**args).value