Merge "Integrate the netready package"
[functest.git] / functest / ci / run_tests.py
old mode 100755 (executable)
new mode 100644 (file)
index 95353c8..b95e100
@@ -1,4 +1,4 @@
-#!/usr/bin/python -u
+#!/usr/bin/env python
 #
 # Author: Jose Lausuch (jose.lausuch@ericsson.com)
 #
@@ -14,6 +14,7 @@ import importlib
 import logging
 import logging.config
 import os
+import pkg_resources
 import re
 import sys
 
@@ -22,8 +23,6 @@ import prettytable
 import functest.ci.tier_builder as tb
 import functest.core.testcase as testcase
 import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_clean as os_clean
-import functest.utils.openstack_snapshot as os_snapshot
 import functest.utils.openstack_utils as os_utils
 from functest.utils.constants import CONST
 
@@ -97,14 +96,6 @@ class Runner(object):
                 elif key == 'OS_PASSWORD':
                     CONST.__setattr__('OS_PASSWORD', value)
 
-    @staticmethod
-    def generate_os_snapshot():
-        os_snapshot.main()
-
-    @staticmethod
-    def cleanup():
-        os_clean.main()
-
     @staticmethod
     def get_run_dict(testname):
         try:
@@ -124,14 +115,11 @@ class Runner(object):
                 "The test case {} is not enabled".format(test.get_name()))
         logger.info("\n")  # blank line
         self.print_separator("=")
-        logger.info("Running test case '%s'..." % test.get_name())
+        logger.info("Running test case '%s'...", test.get_name())
         self.print_separator("=")
         logger.debug("\n%s" % test)
         self.source_rc_file()
 
-        if test.needs_clean() and self.clean_flag:
-            self.generate_os_snapshot()
-
         flags = " -t %s" % test.get_name()
         if self.report_flag:
             flags += " -r"
@@ -145,6 +133,9 @@ class Runner(object):
                 test_dict = ft_utils.get_dict_by_test(test.get_name())
                 test_case = cls(**test_dict)
                 self.executed_test_cases.append(test_case)
+                if self.clean_flag:
+                    if test_case.create_snapshot() != test_case.EX_OK:
+                        return result
                 try:
                     kwargs = run_dict['args']
                     result = test_case.run(**kwargs)
@@ -155,6 +146,8 @@ class Runner(object):
                         test_case.push_to_db()
                     result = test_case.is_successful()
                 logger.info("Test result:\n\n%s\n", test_case)
+                if self.clean_flag:
+                    test_case.clean()
             except ImportError:
                 logger.exception("Cannot import module {}".format(
                     run_dict['module']))
@@ -164,15 +157,7 @@ class Runner(object):
         else:
             raise Exception("Cannot import the class for the test case.")
 
-        if test.needs_clean() and self.clean_flag:
-            self.cleanup()
-        if result != testcase.TestCase.EX_OK:
-            logger.error("The test case '%s' failed. " % test.get_name())
-            self.overall_result = Result.EX_ERROR
-            if test.is_blocking():
-                raise BlockingTestFailed(
-                    "The test case {} failed and is blocking".format(
-                        test.get_name()))
+        return result
 
     def run_tier(self, tier):
         tier_name = tier.get_name()
@@ -187,7 +172,14 @@ class Runner(object):
         self.print_separator("#")
         logger.debug("\n%s" % tier)
         for test in tests:
-            self.run_test(test, tier_name)
+            result = self.run_test(test, tier_name)
+            if result != testcase.TestCase.EX_OK:
+                logger.error("The test case '%s' failed.", test.get_name())
+                self.overall_result = Result.EX_ERROR
+                if test.is_blocking():
+                    raise BlockingTestFailed(
+                        "The test case {} failed and is blocking".format(
+                            test.get_name()))
 
     def run_all(self, tiers):
         summary = ""
@@ -210,7 +202,7 @@ class Runner(object):
         _tiers = tb.TierBuilder(
             CONST.__getattribute__('INSTALLER_TYPE'),
             CONST.__getattribute__('DEPLOY_SCENARIO'),
-            CONST.__getattribute__("functest_testcases_yaml"))
+            pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
 
         if kwargs['noclean']:
             self.clean_flag = False
@@ -221,13 +213,18 @@ class Runner(object):
         try:
             if kwargs['test']:
                 self.source_rc_file()
-                logger.error(kwargs['test'])
+                logger.debug("Test args: %s", kwargs['test'])
                 if _tiers.get_tier(kwargs['test']):
                     self.run_tier(_tiers.get_tier(kwargs['test']))
                 elif _tiers.get_test(kwargs['test']):
-                    self.run_test(_tiers.get_test(kwargs['test']),
-                                  _tiers.get_tier_name(kwargs['test']),
-                                  kwargs['test'])
+                    result = self.run_test(
+                        _tiers.get_test(kwargs['test']),
+                        _tiers.get_tier_name(kwargs['test']),
+                        kwargs['test'])
+                    if result != testcase.TestCase.EX_OK:
+                        logger.error("The test case '%s' failed.",
+                                     kwargs['test'])
+                        self.overall_result = Result.EX_ERROR
                 elif kwargs['test'] == "all":
                     self.run_all(_tiers)
                 else:
@@ -255,25 +252,27 @@ class Runner(object):
             msg.add_row([env_var, CONST.__getattribute__(env_var)])
         logger.info("Deployment description: \n\n%s\n", msg)
 
-        msg = prettytable.PrettyTable(
-            header_style='upper', padding_width=5,
-            field_names=['test case', 'project', 'tier', 'duration', 'result'])
-        for test_case in self.executed_test_cases:
-            result = 'PASS' if(test_case.is_successful(
-                    ) == test_case.EX_OK) else 'FAIL'
-            msg.add_row([test_case.case_name, test_case.project_name,
-                         _tiers.get_tier_name(test_case.case_name),
-                         test_case.get_duration(), result])
-        logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
+        if len(self.executed_test_cases) > 1:
+            msg = prettytable.PrettyTable(
+                header_style='upper', padding_width=5,
+                field_names=['test case', 'project', 'tier',
+                             'duration', 'result'])
+            for test_case in self.executed_test_cases:
+                result = 'PASS' if(test_case.is_successful(
+                        ) == test_case.EX_OK) else 'FAIL'
+                msg.add_row([test_case.case_name, test_case.project_name,
+                             _tiers.get_tier_name(test_case.case_name),
+                             test_case.get_duration(), result])
+            logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
 
         logger.info("Execution exit value: %s" % self.overall_result)
         return self.overall_result
 
 
-if __name__ == '__main__':
-    logging.config.fileConfig(
-        CONST.__getattribute__('dir_functest_logging_cfg'))
+def main():
+    logging.config.fileConfig(pkg_resources.resource_filename(
+        'functest', 'ci/logging.ini'))
     parser = RunTestsParser()
     args = parser.parse_args(sys.argv[1:])
     runner = Runner()
-    sys.exit(runner.main(**args).value)
+    return runner.main(**args).value