X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=functest%2Fci%2Frun_tests.py;h=d4acd9c5616aa0c2ad81f9dfe9a15a38f5a303d5;hb=516d1bd7811f5751d796748ddd52f798bbd5a40f;hp=95353c87ae2ad05645b5b1d11ca4ab81cffaa13b;hpb=7e6191e9d4f3734efb1aacf2ded8c20b16aeb4af;p=functest.git diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py old mode 100755 new mode 100644 index 95353c87a..d4acd9c56 --- a/functest/ci/run_tests.py +++ b/functest/ci/run_tests.py @@ -1,12 +1,11 @@ -#!/usr/bin/python -u -# -# Author: Jose Lausuch (jose.lausuch@ericsson.com) +#!/usr/bin/env python + +# Copyright (c) 2016 Ericsson AB and others. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 -# import argparse import enum @@ -14,16 +13,16 @@ import importlib import logging import logging.config import os +import pkg_resources import re import sys +import textwrap import prettytable import functest.ci.tier_builder as tb import functest.core.testcase as testcase import functest.utils.functest_utils as ft_utils -import functest.utils.openstack_clean as os_clean -import functest.utils.openstack_snapshot as os_snapshot import functest.utils.openstack_utils as os_utils from functest.utils.constants import CONST @@ -67,17 +66,14 @@ class RunTestsParser(object): class Runner(object): def __init__(self): - self.executed_test_cases = [] + self.executed_test_cases = {} self.overall_result = Result.EX_OK self.clean_flag = True self.report_flag = False - - @staticmethod - def print_separator(str, count=45): - line = "" - for i in range(0, count - 1): - line += str - logger.info("%s" % line) + self._tiers = tb.TierBuilder( + CONST.__getattribute__('INSTALLER_TYPE'), + CONST.__getattribute__('DEPLOY_SCENARIO'), + pkg_resources.resource_filename('functest', 'ci/testcases.yaml')) @staticmethod def source_rc_file(): @@ -96,14 +92,8 @@ class Runner(object): CONST.__setattr__('OS_TENANT_NAME', value) elif key == 'OS_PASSWORD': CONST.__setattr__('OS_PASSWORD', value) - - @staticmethod - def generate_os_snapshot(): - os_snapshot.main() - - @staticmethod - def cleanup(): - os_clean.main() + elif key == "OS_PROJECT_DOMAIN_NAME": + CONST.__setattr__('OS_PROJECT_DOMAIN_NAME', value) @staticmethod def get_run_dict(testname): @@ -118,24 +108,11 @@ class Runner(object): logger.exception("Cannot get {}'s config options".format(testname)) return None - def run_test(self, test, tier_name, testcases=None): + def run_test(self, test): if not test.is_enabled(): raise TestNotEnabled( "The test case {} is not enabled".format(test.get_name())) - logger.info("\n") # blank line - self.print_separator("=") - logger.info("Running test case '%s'..." % test.get_name()) - self.print_separator("=") - logger.debug("\n%s" % test) - self.source_rc_file() - - if test.needs_clean() and self.clean_flag: - self.generate_os_snapshot() - - flags = " -t %s" % test.get_name() - if self.report_flag: - flags += " -r" - + logger.info("Running test case '%s'...", test.get_name()) result = testcase.TestCase.EX_RUN_ERROR run_dict = self.get_run_dict(test.get_name()) if run_dict: @@ -144,17 +121,24 @@ class Runner(object): cls = getattr(module, run_dict['class']) test_dict = ft_utils.get_dict_by_test(test.get_name()) test_case = cls(**test_dict) - self.executed_test_cases.append(test_case) + self.executed_test_cases[test.get_name()] = test_case + if self.clean_flag: + if test_case.create_snapshot() != test_case.EX_OK: + return testcase.TestCase.EX_RUN_ERROR try: kwargs = run_dict['args'] - result = test_case.run(**kwargs) + test_case.run(**kwargs) except KeyError: - result = test_case.run() - if result == testcase.TestCase.EX_OK: - if self.report_flag: - test_case.push_to_db() + test_case.run() + if self.report_flag: + test_case.push_to_db() + if test.get_project() == "functest": result = test_case.is_successful() + else: + result = testcase.TestCase.EX_OK logger.info("Test result:\n\n%s\n", test_case) + if self.clean_flag: + test_case.clean() except ImportError: logger.exception("Cannot import module {}".format( run_dict['module'])) @@ -163,16 +147,7 @@ class Runner(object): run_dict['class'])) else: raise Exception("Cannot import the class for the test case.") - - if test.needs_clean() and self.clean_flag: - self.cleanup() - if result != testcase.TestCase.EX_OK: - logger.error("The test case '%s' failed. " % test.get_name()) - self.overall_result = Result.EX_ERROR - if test.is_blocking(): - raise BlockingTestFailed( - "The test case {} failed and is blocking".format( - test.get_name())) + return result def run_tier(self, tier): tier_name = tier.get_name() @@ -180,56 +155,62 @@ class Runner(object): if tests is None or len(tests) == 0: logger.info("There are no supported test cases in this tier " "for the given scenario") - return 0 - logger.info("\n\n") # blank line - self.print_separator("#") - logger.info("Running tier '%s'" % tier_name) - self.print_separator("#") - logger.debug("\n%s" % tier) - for test in tests: - self.run_test(test, tier_name) + self.overall_result = Result.EX_ERROR + else: + logger.info("Running tier '%s'" % tier_name) + for test in tests: + self.run_test(test) + test_case = self.executed_test_cases[test.get_name()] + if test_case.is_successful() != testcase.TestCase.EX_OK: + logger.error("The test case '%s' failed.", test.get_name()) + if test.get_project() == "functest": + self.overall_result = Result.EX_ERROR + if test.is_blocking(): + raise BlockingTestFailed( + "The test case {} failed and is blocking".format( + test.get_name())) + return self.overall_result - def run_all(self, tiers): - summary = "" + def run_all(self): tiers_to_run = [] - - for tier in tiers.get_tiers(): + msg = prettytable.PrettyTable( + header_style='upper', padding_width=5, + field_names=['tiers', 'order', 'CI Loop', 'description', + 'testcases']) + for tier in self._tiers.get_tiers(): if (len(tier.get_tests()) != 0 and re.search(CONST.__getattribute__('CI_LOOP'), tier.get_ci_loop()) is not None): tiers_to_run.append(tier) - summary += ("\n - %s:\n\t %s" - % (tier.get_name(), - tier.get_test_names())) - - logger.info("Tests to be executed:%s" % summary) + msg.add_row([tier.get_name(), tier.get_order(), + tier.get_ci_loop(), + textwrap.fill(tier.description, width=40), + textwrap.fill(' '.join([str(x.get_name( + )) for x in tier.get_tests()]), width=40)]) + logger.info("TESTS TO BE EXECUTED:\n\n%s\n", msg) for tier in tiers_to_run: self.run_tier(tier) def main(self, **kwargs): - _tiers = tb.TierBuilder( - CONST.__getattribute__('INSTALLER_TYPE'), - CONST.__getattribute__('DEPLOY_SCENARIO'), - CONST.__getattribute__("functest_testcases_yaml")) - - if kwargs['noclean']: - self.clean_flag = False - - if kwargs['report']: - self.report_flag = True - + if 'noclean' in kwargs: + self.clean_flag = not kwargs['noclean'] + if 'report' in kwargs: + self.report_flag = kwargs['report'] try: - if kwargs['test']: + if 'test' in kwargs: self.source_rc_file() - logger.error(kwargs['test']) - if _tiers.get_tier(kwargs['test']): - self.run_tier(_tiers.get_tier(kwargs['test'])) - elif _tiers.get_test(kwargs['test']): - self.run_test(_tiers.get_test(kwargs['test']), - _tiers.get_tier_name(kwargs['test']), - kwargs['test']) + logger.debug("Test args: %s", kwargs['test']) + if self._tiers.get_tier(kwargs['test']): + self.run_tier(self._tiers.get_tier(kwargs['test'])) + elif self._tiers.get_test(kwargs['test']): + result = self.run_test( + self._tiers.get_test(kwargs['test'])) + if result != testcase.TestCase.EX_OK: + logger.error("The test case '%s' failed.", + kwargs['test']) + self.overall_result = Result.EX_ERROR elif kwargs['test'] == "all": - self.run_all(_tiers) + self.run_all() else: logger.error("Unknown test case or tier '%s', " "or not supported by " @@ -237,43 +218,58 @@ class Runner(object): % (kwargs['test'], CONST.__getattribute__('DEPLOY_SCENARIO'))) logger.debug("Available tiers are:\n\n%s", - _tiers) + self._tiers) return Result.EX_ERROR else: - self.run_all(_tiers) + self.run_all() except BlockingTestFailed: pass except Exception: logger.exception("Failures when running testcase(s)") self.overall_result = Result.EX_ERROR + if not self._tiers.get_test(kwargs['test']): + self.summary(self._tiers.get_tier(kwargs['test'])) + logger.info("Execution exit value: %s" % self.overall_result) + return self.overall_result + def summary(self, tier=None): msg = prettytable.PrettyTable( header_style='upper', padding_width=5, field_names=['env var', 'value']) for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG', 'CI_LOOP']: msg.add_row([env_var, CONST.__getattribute__(env_var)]) - logger.info("Deployment description: \n\n%s\n", msg) - + logger.info("Deployment description:\n\n%s\n", msg) msg = prettytable.PrettyTable( header_style='upper', padding_width=5, - field_names=['test case', 'project', 'tier', 'duration', 'result']) - for test_case in self.executed_test_cases: - result = 'PASS' if(test_case.is_successful( - ) == test_case.EX_OK) else 'FAIL' - msg.add_row([test_case.case_name, test_case.project_name, - _tiers.get_tier_name(test_case.case_name), + field_names=['test case', 'project', 'tier', + 'duration', 'result']) + tiers = [tier] if tier else self._tiers.get_tiers() + for tier in tiers: + for test in tier.get_tests(): + try: + test_case = self.executed_test_cases[test.get_name()] + except KeyError: + msg.add_row([test.get_name(), test.get_project(), + tier.get_name(), "00:00", "SKIP"]) + else: + result = 'PASS' if(test_case.is_successful( + ) == test_case.EX_OK) else 'FAIL' + msg.add_row( + [test_case.case_name, test_case.project_name, + self._tiers.get_tier_name(test_case.case_name), test_case.get_duration(), result]) - logger.info("FUNCTEST REPORT: \n\n%s\n", msg) - - logger.info("Execution exit value: %s" % self.overall_result) - return self.overall_result + for test in tier.get_skipped_test(): + msg.add_row([test.get_name(), test.get_project(), + tier.get_name(), "00:00", "SKIP"]) + logger.info("FUNCTEST REPORT:\n\n%s\n", msg) -if __name__ == '__main__': - logging.config.fileConfig( - CONST.__getattribute__('dir_functest_logging_cfg')) +def main(): + logging.config.fileConfig(pkg_resources.resource_filename( + 'functest', 'ci/logging.ini')) + logging.captureWarnings(True) parser = RunTestsParser() args = parser.parse_args(sys.argv[1:]) runner = Runner() - sys.exit(runner.main(**args).value) + return runner.main(**args).value