X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=ci%2Frun_tests.py;h=758a87c2d311ccbe582fa76a4b5219485dc59f84;hb=8f3ad08c40bd8977651794f6720eda5df61b0c26;hp=5b930982847146a13fbd8590d977105486947a6b;hpb=6790e17eb7c1a3eaaccfe97ac90932e3c15eea6d;p=functest.git diff --git a/ci/run_tests.py b/ci/run_tests.py old mode 100644 new mode 100755 index 5b9309828..758a87c2d --- a/ci/run_tests.py +++ b/ci/run_tests.py @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env python # # Author: Jose Lausuch (jose.lausuch@ericsson.com) # @@ -9,17 +9,19 @@ # import argparse +import datetime import os -import subprocess +import re import sys - +import functest.ci.generate_report as generate_report import functest.ci.tier_builder as tb -import functest.utils.clean_openstack as clean_os import functest.utils.functest_logger as ft_logger +import functest.utils.functest_utils as ft_utils +import functest.utils.openstack_clean as os_clean +import functest.utils.openstack_snapshot as os_snapshot import functest.utils.openstack_utils as os_utils -""" arguments """ parser = argparse.ArgumentParser() parser.add_argument("-t", "--test", dest="test", action='store', help="Test case or tier (group of tests) to be executed. " @@ -42,6 +44,11 @@ FUNCTEST_REPO = ("%s/functest/" % REPOS_DIR) EXEC_SCRIPT = ("%sci/exec_test.sh" % FUNCTEST_REPO) CLEAN_FLAG = True REPORT_FLAG = False +EXECUTED_TEST_CASES = [] + +# This will be the return code of this script. If any of the tests fails, +# this variable will change to -1 +OVERALL_RESULT = 0 def print_separator(str, count=45): @@ -56,66 +63,114 @@ def source_rc_file(): if not os.path.isfile(rc_file): logger.error("RC file %s does not exist..." % rc_file) sys.exit(1) - logger.info("Sourcing the OpenStack RC file...") + logger.debug("Sourcing the OpenStack RC file...") os_utils.source_credentials(rc_file) +def generate_os_snapshot(): + os_snapshot.main() + + def cleanup(): - clean_os.main() + os_clean.main() + +def update_test_info(test_name, result, duration): + for test in EXECUTED_TEST_CASES: + if test['test_name'] == test_name: + test.update({"result": result, + "duration": duration}) -def run_test(test): + +def run_test(test, tier_name): + global OVERALL_RESULT, EXECUTED_TEST_CASES + result_str = "PASS" + start = datetime.datetime.now() test_name = test.get_name() + logger.info("\n") # blank line print_separator("=") logger.info("Running test case '%s'..." % test_name) print_separator("=") logger.debug("\n%s" % test) + + if CLEAN_FLAG: + generate_os_snapshot() + flags = (" -t %s" % (test_name)) if REPORT_FLAG: flags += " -r" cmd = ("%s%s" % (EXEC_SCRIPT, flags)) logger.debug("Executing command '%s'" % cmd) + result = ft_utils.execute_command(cmd, logger, exit_on_error=False) - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) + if CLEAN_FLAG: + cleanup() + end = datetime.datetime.now() + duration = (end - start).seconds + duration_str = ("%02d:%02d" % divmod(duration, 60)) + logger.info("Test execution time: %s" % duration_str) - while p.poll() is None: - line = p.stdout.readline().rstrip() - logger.debug(line) + if result != 0: + logger.error("The test case '%s' failed. " % test_name) + OVERALL_RESULT = -1 + result_str = "FAIL" - if p.returncode != 0: - logger.error("The test case '%s' failed. Cleaning and exiting." - % test_name) - if CLEAN_FLAG: - cleanup() - sys.exit(1) + if test.is_blocking(): + if not args.test or args.test == "all": + logger.info("This test case is blocking. Aborting overall " + "execution.") + # if it is a single test we don't print the whole results table + update_test_info(test_name, result_str, duration_str) + generate_report.main(EXECUTED_TEST_CASES) + logger.info("Execution exit value: %s" % OVERALL_RESULT) + sys.exit(OVERALL_RESULT) - if CLEAN_FLAG: - cleanup() + update_test_info(test_name, result_str, duration_str) def run_tier(tier): + tier_name = tier.get_name() + tests = tier.get_tests() + if tests is None or len(tests) == 0: + logger.info("There are no supported test cases in this tier " + "for the given scenario") + return 0 + logger.info("\n\n") # blank line print_separator("#") - logger.info("Running tier '%s'" % tier.get_name()) + logger.info("Running tier '%s'" % tier_name) print_separator("#") logger.debug("\n%s" % tier) - for test in tier.get_tests(): - run_test(test) + for test in tests: + run_test(test, tier_name) def run_all(tiers): + global EXECUTED_TEST_CASES summary = "" - for tier in tiers.get_tiers(): - summary += ("\n - %s. %s:\n\t %s" - % (tier.get_order(), - tier.get_name(), - tier.get_test_names())) + BUILD_TAG = os.getenv('BUILD_TAG') + if BUILD_TAG is not None and re.search("daily", BUILD_TAG) is not None: + CI_LOOP = "daily" + else: + CI_LOOP = "weekly" - logger.info("Tiers to be executed:%s" % summary) + tiers_to_run = [] for tier in tiers.get_tiers(): + if (len(tier.get_tests()) != 0 and + re.search(CI_LOOP, tier.get_ci_loop()) is not None): + tiers_to_run.append(tier) + summary += ("\n - %s:\n\t %s" + % (tier.get_name(), + tier.get_test_names())) + + logger.info("Tests to be executed:%s" % summary) + EXECUTED_TEST_CASES = generate_report.init(tiers_to_run) + for tier in tiers_to_run: run_tier(tier) + generate_report.main(EXECUTED_TEST_CASES) + def main(): global CLEAN_FLAG @@ -139,7 +194,7 @@ def main(): run_tier(_tiers.get_tier(args.test)) elif _tiers.get_test(args.test): - run_test(_tiers.get_test(args.test)) + run_test(_tiers.get_test(args.test), _tiers.get_tier(args.test)) elif args.test == "all": run_all(_tiers) @@ -153,7 +208,8 @@ def main(): else: run_all(_tiers) - sys.exit(0) + logger.info("Execution exit value: %s" % OVERALL_RESULT) + sys.exit(OVERALL_RESULT) if __name__ == '__main__': main()