Run all daily testcases if CI_LOOP is unset
[functest.git] / functest / ci / run_tests.py
index c8143f1..ff38720 100644 (file)
@@ -7,46 +7,53 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 
+""" The entry of running tests:
+1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
+2) Execute the common operations on every testcase (run, push results to db...)
+3) Return the right status code
+"""
+
 import argparse
-import enum
 import importlib
 import logging
 import logging.config
 import os
-import pkg_resources
 import re
 import sys
 import textwrap
+import pkg_resources
 
+import enum
 import prettytable
+import yaml
 
 import functest.ci.tier_builder as tb
 import functest.core.testcase as testcase
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-from functest.utils.constants import CONST
-
-# __name__ cannot be used here
-logger = logging.getLogger('functest.ci.run_tests')
 
-CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
-    'functest', 'ci/config_functest.yaml')
+LOGGER = logging.getLogger('functest.ci.run_tests')
+ENV_FILE = "/home/opnfv/functest/conf/env_file"
 
 
 class Result(enum.Enum):
+    """The overall result in enumerated type"""
+    # pylint: disable=too-few-public-methods
     EX_OK = os.EX_OK
     EX_ERROR = -1
 
 
 class BlockingTestFailed(Exception):
+    """Exception when the blocking test fails"""
     pass
 
 
 class TestNotEnabled(Exception):
+    """Exception when the test is not enabled"""
     pass
 
 
 class RunTestsParser(object):
+    """Parser to run tests"""
+    # pylint: disable=too-few-public-methods
 
     def __init__(self):
         self.parser = argparse.ArgumentParser()
@@ -62,60 +69,91 @@ class RunTestsParser(object):
                                  "database (default=false).",
                                  action="store_true")
 
-    def parse_args(self, argv=[]):
+    def parse_args(self, argv=None):
+        """Parse arguments.
+
+        It can call sys.exit if arguments are incorrect.
+
+        Returns:
+            the arguments from cmdline
+        """
         return vars(self.parser.parse_args(argv))
 
 
 class Runner(object):
+    """Runner class"""
 
     def __init__(self):
         self.executed_test_cases = {}
         self.overall_result = Result.EX_OK
         self.clean_flag = True
         self.report_flag = False
-        self._tiers = tb.TierBuilder(
-            CONST.__getattribute__('INSTALLER_TYPE'),
-            CONST.__getattribute__('DEPLOY_SCENARIO'),
+        self.tiers = tb.TierBuilder(
+            os.environ.get('INSTALLER_TYPE', None),
+            os.environ.get('DEPLOY_SCENARIO', None),
             pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
 
     @staticmethod
-    def source_rc_file():
-        rc_file = CONST.__getattribute__('openstack_creds')
+    def source_envfile(rc_file=ENV_FILE):
+        """Source the env file passed as arg"""
         if not os.path.isfile(rc_file):
-            raise Exception("RC file %s does not exist..." % rc_file)
-        logger.debug("Sourcing the OpenStack RC file...")
-        os_utils.source_credentials(rc_file)
+            LOGGER.debug("No env file %s found", rc_file)
+            return
+        with open(rc_file, "r") as rcfd:
+            LOGGER.info("Sourcing env file %s", rc_file)
+            for line in rcfd:
+                var = (line.rstrip('"\n').replace('export ', '').split(
+                    "=") if re.search(r'(.*)=(.*)', line) else None)
+                # The two next lines should be modified as soon as rc_file
+                # conforms with common rules. Be aware that it could induce
+                # issues if value starts with '
+                if var:
+                    key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
+                    value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
+                    os.environ[key] = value
+
+    @staticmethod
+    def get_dict_by_test(testname):
+        # pylint: disable=bad-continuation,missing-docstring
+        with open(pkg_resources.resource_filename(
+                'functest', 'ci/testcases.yaml')) as tyaml:
+            testcases_yaml = yaml.safe_load(tyaml)
+        for dic_tier in testcases_yaml.get("tiers"):
+            for dic_testcase in dic_tier['testcases']:
+                if dic_testcase['case_name'] == testname:
+                    return dic_testcase
+        LOGGER.error('Project %s is not defined in testcases.yaml', testname)
+        return None
 
     @staticmethod
     def get_run_dict(testname):
+        """Obtain the 'run' block of the testcase from testcases.yaml"""
         try:
-            dict = ft_utils.get_dict_by_test(testname)
-            if not dict:
-                logger.error("Cannot get {}'s config options".format(testname))
-            elif 'run' in dict:
-                return dict['run']
+            dic_testcase = Runner.get_dict_by_test(testname)
+            if not dic_testcase:
+                LOGGER.error("Cannot get %s's config options", testname)
+            elif 'run' in dic_testcase:
+                return dic_testcase['run']
             return None
-        except Exception:
-            logger.exception("Cannot get {}'s config options".format(testname))
+        except Exception:  # pylint: disable=broad-except
+            LOGGER.exception("Cannot get %s's config options", testname)
             return None
 
     def run_test(self, test):
+        """Run one test case"""
         if not test.is_enabled():
             raise TestNotEnabled(
                 "The test case {} is not enabled".format(test.get_name()))
-        logger.info("Running test case '%s'...", test.get_name())
+        LOGGER.info("Running test case '%s'...", test.get_name())
         result = testcase.TestCase.EX_RUN_ERROR
         run_dict = self.get_run_dict(test.get_name())
         if run_dict:
             try:
                 module = importlib.import_module(run_dict['module'])
                 cls = getattr(module, run_dict['class'])
-                test_dict = ft_utils.get_dict_by_test(test.get_name())
+                test_dict = Runner.get_dict_by_test(test.get_name())
                 test_case = cls(**test_dict)
                 self.executed_test_cases[test.get_name()] = test_case
-                if self.clean_flag:
-                    if test_case.create_snapshot() != test_case.EX_OK:
-                        return testcase.TestCase.EX_RUN_ERROR
                 try:
                     kwargs = run_dict['args']
                     test_case.run(**kwargs)
@@ -127,33 +165,32 @@ class Runner(object):
                     result = test_case.is_successful()
                 else:
                     result = testcase.TestCase.EX_OK
-                logger.info("Test result:\n\n%s\n", test_case)
+                LOGGER.info("Test result:\n\n%s\n", test_case)
                 if self.clean_flag:
                     test_case.clean()
             except ImportError:
-                logger.exception("Cannot import module {}".format(
-                    run_dict['module']))
+                LOGGER.exception("Cannot import module %s", run_dict['module'])
             except AttributeError:
-                logger.exception("Cannot get class {}".format(
-                    run_dict['class']))
+                LOGGER.exception("Cannot get class %s", run_dict['class'])
         else:
             raise Exception("Cannot import the class for the test case.")
         return result
 
     def run_tier(self, tier):
+        """Run one tier"""
         tier_name = tier.get_name()
         tests = tier.get_tests()
-        if tests is None or len(tests) == 0:
-            logger.info("There are no supported test cases in this tier "
+        if not tests:
+            LOGGER.info("There are no supported test cases in this tier "
                         "for the given scenario")
             self.overall_result = Result.EX_ERROR
         else:
-            logger.info("Running tier '%s'" % tier_name)
+            LOGGER.info("Running tier '%s'", tier_name)
             for test in tests:
                 self.run_test(test)
                 test_case = self.executed_test_cases[test.get_name()]
                 if test_case.is_successful() != testcase.TestCase.EX_OK:
-                    logger.error("The test case '%s' failed.", test.get_name())
+                    LOGGER.error("The test case '%s' failed.", test.get_name())
                     if test.get_project() == "functest":
                         self.overall_result = Result.EX_ERROR
                     if test.is_blocking():
@@ -163,100 +200,105 @@ class Runner(object):
         return self.overall_result
 
     def run_all(self):
+        """Run all available testcases"""
         tiers_to_run = []
         msg = prettytable.PrettyTable(
             header_style='upper', padding_width=5,
             field_names=['tiers', 'order', 'CI Loop', 'description',
                          'testcases'])
-        for tier in self._tiers.get_tiers():
-            if (len(tier.get_tests()) != 0 and
-                    re.search(CONST.__getattribute__('CI_LOOP'),
-                              tier.get_ci_loop()) is not None):
+        for tier in self.tiers.get_tiers():
+            ci_loop = os.environ.get('CI_LOOP', 'daily')
+            if (tier.get_tests() and
+                    re.search(ci_loop, tier.get_ci_loop()) is not None):
                 tiers_to_run.append(tier)
                 msg.add_row([tier.get_name(), tier.get_order(),
                              tier.get_ci_loop(),
                              textwrap.fill(tier.description, width=40),
                              textwrap.fill(' '.join([str(x.get_name(
                                  )) for x in tier.get_tests()]), width=40)])
-        logger.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
+        LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
         for tier in tiers_to_run:
             self.run_tier(tier)
 
     def main(self, **kwargs):
+        """Entry point of class Runner"""
         if 'noclean' in kwargs:
             self.clean_flag = not kwargs['noclean']
         if 'report' in kwargs:
             self.report_flag = kwargs['report']
         try:
             if 'test' in kwargs:
-                self.source_rc_file()
-                logger.debug("Test args: %s", kwargs['test'])
-                if self._tiers.get_tier(kwargs['test']):
-                    self.run_tier(self._tiers.get_tier(kwargs['test']))
-                elif self._tiers.get_test(kwargs['test']):
+                LOGGER.debug("Sourcing the credential file...")
+                self.source_envfile()
+
+                LOGGER.debug("Test args: %s", kwargs['test'])
+                if self.tiers.get_tier(kwargs['test']):
+                    self.run_tier(self.tiers.get_tier(kwargs['test']))
+                elif self.tiers.get_test(kwargs['test']):
                     result = self.run_test(
-                        self._tiers.get_test(kwargs['test']))
+                        self.tiers.get_test(kwargs['test']))
                     if result != testcase.TestCase.EX_OK:
-                        logger.error("The test case '%s' failed.",
+                        LOGGER.error("The test case '%s' failed.",
                                      kwargs['test'])
                         self.overall_result = Result.EX_ERROR
                 elif kwargs['test'] == "all":
                     self.run_all()
                 else:
-                    logger.error("Unknown test case or tier '%s', "
-                                 "or not supported by "
-                                 "the given scenario '%s'."
-                                 % (kwargs['test'],
-                                    CONST.__getattribute__('DEPLOY_SCENARIO')))
-                    logger.debug("Available tiers are:\n\n%s",
-                                 self._tiers)
+                    LOGGER.error("Unknown test case or tier '%s', or not "
+                                 "supported by the given scenario '%s'.",
+                                 kwargs['test'],
+                                 os.environ.get('DEPLOY_SCENARIO', ""))
+                    LOGGER.debug("Available tiers are:\n\n%s",
+                                 self.tiers)
                     return Result.EX_ERROR
             else:
                 self.run_all()
         except BlockingTestFailed:
             pass
-        except Exception:
-            logger.exception("Failures when running testcase(s)")
+        except Exception:  # pylint: disable=broad-except
+            LOGGER.exception("Failures when running testcase(s)")
             self.overall_result = Result.EX_ERROR
-        if not self._tiers.get_test(kwargs['test']):
-            self.summary(self._tiers.get_tier(kwargs['test']))
-        logger.info("Execution exit value: %s" % self.overall_result)
+        if not self.tiers.get_test(kwargs['test']):
+            self.summary(self.tiers.get_tier(kwargs['test']))
+        LOGGER.info("Execution exit value: %s", self.overall_result)
         return self.overall_result
 
     def summary(self, tier=None):
+        """To generate functest report showing the overall results"""
         msg = prettytable.PrettyTable(
             header_style='upper', padding_width=5,
             field_names=['env var', 'value'])
         for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
                         'CI_LOOP']:
-            msg.add_row([env_var, CONST.__getattribute__(env_var)])
-        logger.info("Deployment description:\n\n%s\n", msg)
+            msg.add_row([env_var, os.environ.get(env_var, "")])
+        LOGGER.info("Deployment description:\n\n%s\n", msg)
         msg = prettytable.PrettyTable(
             header_style='upper', padding_width=5,
             field_names=['test case', 'project', 'tier',
                          'duration', 'result'])
-        tiers = [tier] if tier else self._tiers.get_tiers()
-        for tier in tiers:
-            for test in tier.get_tests():
+        tiers = [tier] if tier else self.tiers.get_tiers()
+        for each_tier in tiers:
+            for test in each_tier.get_tests():
                 try:
                     test_case = self.executed_test_cases[test.get_name()]
                 except KeyError:
                     msg.add_row([test.get_name(), test.get_project(),
-                                 tier.get_name(), "00:00", "SKIP"])
+                                 each_tier.get_name(), "00:00", "SKIP"])
                 else:
                     result = 'PASS' if(test_case.is_successful(
                         ) == test_case.EX_OK) else 'FAIL'
                     msg.add_row(
                         [test_case.case_name, test_case.project_name,
-                         self._tiers.get_tier_name(test_case.case_name),
+                         self.tiers.get_tier_name(test_case.case_name),
                          test_case.get_duration(), result])
-            for test in tier.get_skipped_test():
+            for test in each_tier.get_skipped_test():
                 msg.add_row([test.get_name(), test.get_project(),
-                             tier.get_name(), "00:00", "SKIP"])
-        logger.info("FUNCTEST REPORT:\n\n%s\n", msg)
+                             each_tier.get_name(), "00:00", "SKIP"])
+        LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
 
 
 def main():
+    """Entry point"""
     logging.config.fileConfig(pkg_resources.resource_filename(
         'functest', 'ci/logging.ini'))
     logging.captureWarnings(True)