X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=sdnvpn%2Ftest%2Ffunctest%2Ftempest.py;h=8928e7c8eea7d14142024467927338b9b95e766e;hb=c14d1a337c94c11d96190413869e41ace93ea2bc;hp=855ec1bb6c0e1d63f158a445667489b2833ed92f;hpb=89ce471643c0cbaf41cac0e6995e754b2169889b;p=sdnvpn.git diff --git a/sdnvpn/test/functest/tempest.py b/sdnvpn/test/functest/tempest.py index 855ec1b..8928e7c 100644 --- a/sdnvpn/test/functest/tempest.py +++ b/sdnvpn/test/functest/tempest.py @@ -14,24 +14,22 @@ import re import shutil import functest.utils.functest_logger as ft_logger -import functest.utils.functest_utils as ft_utils -from sdnvpn.lib import config as sdnvpn_config +import functest.opnfv_tests.openstack.tempest.conf_utils as tempest_utils logger = ft_logger.Logger("sdnvpn-tempest").getLogger() -COMMON_CONFIG = sdnvpn_config.CommonConfig() - -SUCCESS_CRITERIA = ft_utils.get_parameter_from_yaml( - "testcases.testcase_1.succes_criteria", COMMON_CONFIG.config_file) - def main(): - src_tempest_dir = ft_utils.get_deployment_dir() + verifier_repo_dir = tempest_utils.get_verifier_repo_dir(None) + src_tempest_dir = tempest_utils.get_verifier_deployment_dir(None, None) + if not src_tempest_dir: logger.error("Rally deployment not found.") exit(-1) - src_tempest_conf = src_tempest_dir + '/tempest.conf' + tempest_utils.configure_verifier(src_tempest_dir) + + src_tempest_conf = os.path.join(src_tempest_dir, 'tempest.conf') bgpvpn_tempest_conf = src_tempest_dir + '/bgpvpn_tempest.conf' if not os.path.isfile(src_tempest_conf): @@ -47,13 +45,14 @@ def main(): with open(bgpvpn_tempest_conf, 'wb') as tempest_conf: config.write(tempest_conf) - cmd_line = (src_tempest_dir + + cmd_line = (verifier_repo_dir + "/run_tempest.sh -C %s -t -N -- " "networking_bgpvpn_tempest" % bgpvpn_tempest_conf) logger.info("Executing: %s" % cmd_line) cmd = os.popen(cmd_line) output = cmd.read() logger.debug(output) + # Results parsing error_logs = "" duration = 0 @@ -78,9 +77,11 @@ def main(): results = {"duration": duration, "num_tests": num_tests, "failed": failed, "tests": testcases} - status = "PASS" - if 100 - (100 * int(failed) / int(num_tests)) < int(SUCCESS_CRITERIA): + if int(failed) == 0: + status = "PASS" + else: status = "FAILED" + return {"status": status, "details": results} except: logger.error("Problem when parsing the results.")