Merge "rename tempest.conf to refstack_tempest.conf"
authorJose Lausuch <jose.lausuch@ericsson.com>
Tue, 2 May 2017 11:50:20 +0000 (11:50 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Tue, 2 May 2017 11:50:20 +0000 (11:50 +0000)
41 files changed:
functest/ci/run_tests.py
functest/ci/testcases.yaml
functest/ci/tier_handler.py
functest/core/feature.py
functest/core/pytest_suite_runner.py
functest/core/testcase.py
functest/core/vnf_base.py
functest/opnfv_tests/features/barometer.py
functest/opnfv_tests/openstack/rally/rally.py
functest/opnfv_tests/openstack/refstack_client/refstack_client.py
functest/opnfv_tests/openstack/tempest/tempest.py
functest/opnfv_tests/openstack/vping/vping_base.py
functest/opnfv_tests/sdn/odl/odl.py
functest/opnfv_tests/sdn/onos/onos.py
functest/opnfv_tests/vnf/aaa/aaa.py
functest/opnfv_tests/vnf/ims/cloudify_ims.py
functest/opnfv_tests/vnf/ims/opera_ims.py
functest/opnfv_tests/vnf/ims/orchestra_ims.py
functest/opnfv_tests/vnf/router/vyos_vrouter.py
functest/tests/unit/core/test_feature.py
functest/tests/unit/core/test_testcase.py
functest/tests/unit/core/test_vnf_base.py
functest/tests/unit/odl/test_odl.py
functest/tests/unit/openstack/__init__.py [moved from functest/tests/unit/opnfv_tests/__init__.py with 100% similarity]
functest/tests/unit/openstack/rally/__init__.py [moved from functest/tests/unit/opnfv_tests/openstack/__init__.py with 100% similarity]
functest/tests/unit/openstack/rally/test_rally.py [moved from functest/tests/unit/opnfv_tests/openstack/rally/test_rally.py with 100% similarity]
functest/tests/unit/openstack/refstack_client/__init__.py [moved from functest/tests/unit/opnfv_tests/openstack/rally/__init__.py with 100% similarity]
functest/tests/unit/openstack/refstack_client/test_refstack_client.py [moved from functest/tests/unit/opnfv_tests/openstack/refstack_client/test_refstack_client.py with 100% similarity]
functest/tests/unit/openstack/tempest/__init__.py [moved from functest/tests/unit/opnfv_tests/openstack/refstack_client/__init__.py with 100% similarity]
functest/tests/unit/openstack/tempest/test_conf_utils.py [moved from functest/tests/unit/opnfv_tests/openstack/tempest/test_conf_utils.py with 100% similarity]
functest/tests/unit/openstack/tempest/test_tempest.py [moved from functest/tests/unit/opnfv_tests/openstack/tempest/test_tempest.py with 62% similarity]
functest/tests/unit/opnfv_tests/vnf/__init__.py [deleted file]
functest/tests/unit/opnfv_tests/vnf/ims/__init__.py [deleted file]
functest/tests/unit/utils/test_functest_utils.py
functest/tests/unit/vnf/ims/__init__.py [moved from functest/tests/unit/opnfv_tests/openstack/tempest/__init__.py with 100% similarity]
functest/tests/unit/vnf/ims/test_clearwater.py [moved from functest/tests/unit/opnfv_tests/vnf/ims/test_clearwater.py with 100% similarity]
functest/tests/unit/vnf/ims/test_cloudify_ims.py [moved from functest/tests/unit/opnfv_tests/vnf/ims/test_cloudify_ims.py with 100% similarity]
functest/tests/unit/vnf/ims/test_ims_base.py [moved from functest/tests/unit/opnfv_tests/vnf/ims/test_ims_base.py with 100% similarity]
functest/tests/unit/vnf/ims/test_orchestrator_cloudify.py [moved from functest/tests/unit/opnfv_tests/vnf/ims/test_orchestrator_cloudify.py with 100% similarity]
functest/utils/functest_logger.py
functest/utils/functest_utils.py

index 0ca73f3..e68901b 100755 (executable)
@@ -155,7 +155,7 @@ def run_test(test, tier_name, testcases=None):
             if result == testcase.TestCase.EX_OK:
                 if GlobalVariables.REPORT_FLAG:
                     test_case.push_to_db()
-                result = test_case.check_criteria()
+                result = test_case.check_result()
         except ImportError:
             logger.exception("Cannot import module {}".format(
                 run_dict['module']))
index 3998830..8f2cc4b 100644 (file)
@@ -10,7 +10,7 @@ tiers:
             -
                 case_name: connection_check
                 project_name: functest
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: true
                 clean_flag: false
                 description: >-
@@ -30,7 +30,7 @@ tiers:
             -
                 case_name: api_check
                 project_name: functest
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: true
                 clean_flag: false
                 description: >-
@@ -49,7 +49,7 @@ tiers:
             -
                 case_name: snaps_health_check
                 project_name: functest
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: true
                 clean_flag: false
                 description: >-
@@ -73,7 +73,7 @@ tiers:
             -
                 case_name: vping_ssh
                 project_name: functest
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: true
                 clean_flag: true
                 description: >-
@@ -90,7 +90,7 @@ tiers:
             -
                 case_name: vping_userdata
                 project_name: functest
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: true
                 clean_flag: true
                 description: >-
@@ -106,7 +106,7 @@ tiers:
             -
                 case_name: tempest_smoke_serial
                 project_name: functest
-                criteria: 'success_rate == 100%'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -124,7 +124,7 @@ tiers:
             -
                 case_name: rally_sanity
                 project_name: functest
-                criteria: 'success_rate == 100%'
+                criteria: 100
                 blocking: false
                 clean_flag: false
                 description: >-
@@ -140,7 +140,7 @@ tiers:
             -
                 case_name: refstack_defcore
                 project_name: functest
-                criteria: 'success_rate == 100%'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -156,7 +156,7 @@ tiers:
             -
                 case_name: odl
                 project_name: functest
-                criteria: 'success_rate == 100%'
+                criteria: 100
                 blocking: true
                 clean_flag: false
                 description: >-
@@ -177,7 +177,7 @@ tiers:
             -
                 case_name: odl_netvirt
                 project_name: functest
-                criteria: 'success_rate == 100%'
+                criteria: 100
                 blocking: false
                 clean_flag: false
                 description: >-
@@ -200,7 +200,7 @@ tiers:
             -
                 case_name: fds
                 project_name: functest
-                criteria: 'success_rate == 100%'
+                criteria: 100
                 blocking: false
                 clean_flag: false
                 description: >-
@@ -220,7 +220,7 @@ tiers:
             -
                 case_name: onos
                 project_name: functest
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: true
                 clean_flag: true
                 description: >-
@@ -237,7 +237,7 @@ tiers:
             -
                 case_name: snaps_smoke
                 project_name: functest
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: false
                 clean_flag: false
                 description: >-
@@ -267,7 +267,7 @@ tiers:
             -
                 case_name: promise
                 project_name: promise
-                criteria: 'success_rate == 100%'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -284,7 +284,7 @@ tiers:
             -
                 case_name: doctor-notification
                 project_name: doctor
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -301,7 +301,7 @@ tiers:
             -
                 case_name: bgpvpn
                 project_name: sdnvpn
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -318,7 +318,7 @@ tiers:
             -
                 case_name: security_scan
                 project_name: securityscanning
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -335,7 +335,7 @@ tiers:
             -
                 case_name: copper
                 project_name: copper
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -352,7 +352,7 @@ tiers:
             -
                 case_name: multisite
                 project_name: multisite
-                criteria: 'success_rate == 100%'
+                criteria: 100
                 blocking: false
                 clean_flag: false
                 description: >-
@@ -366,7 +366,7 @@ tiers:
             -
                 case_name: functest-odl-sfc
                 project_name: sfc
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -382,7 +382,7 @@ tiers:
             -
                 case_name: onos_sfc
                 project_name: functest
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: true
                 clean_flag: true
                 description: >-
@@ -396,7 +396,7 @@ tiers:
             -
                 case_name: parser-basics
                 project_name: parser
-                criteria: 'ret == 0'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -412,7 +412,7 @@ tiers:
             -
                 case_name: domino-multinode
                 project_name: domino
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -428,7 +428,7 @@ tiers:
             -
                 case_name: gluon_vping
                 project_name: netready
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -444,7 +444,7 @@ tiers:
             -
                 case_name: barometercollectd
                 project_name: barometer
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -468,7 +468,7 @@ tiers:
             -
                 case_name: tempest_full_parallel
                 project_name: functest
-                criteria: 'success_rate >= 80%'
+                criteria: 80
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -484,7 +484,7 @@ tiers:
             -
                 case_name: tempest_custom
                 project_name: functest
-                criteria: 'success_rate == 100%'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -502,7 +502,7 @@ tiers:
             -
                 case_name: rally_full
                 project_name: functest
-                criteria: 'success_rate >= 90%'
+                criteria: 90
                 blocking: false
                 clean_flag: false
                 description: >-
@@ -525,7 +525,7 @@ tiers:
             -
                 case_name: cloudify_ims
                 project_name: functest
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -540,7 +540,7 @@ tiers:
 #          -
 #               case_name: aaa
 #               project_name: functest
-#               criteria: 'ret == 0'
+#               criteria: 100
 #               blocking: false
 #               clean_flag: true
 #               description: >-
@@ -554,7 +554,7 @@ tiers:
             -
                 case_name: orchestra_ims
                 project_name: functest
-                criteria: 'ret == 0'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -569,7 +569,7 @@ tiers:
             -
                 case_name: opera-vims
                 project_name: opera
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
@@ -584,7 +584,7 @@ tiers:
             -
                 case_name: vyos_vrouter
                 project_name: functest
-                criteria: 'status == "PASS"'
+                criteria: 100
                 blocking: false
                 clean_flag: true
                 description: >-
index 6b4864b..fe7372a 100644 (file)
@@ -158,7 +158,7 @@ class TestCase(object):
         for line in lines:
             out += ("|    " + line.ljust(LINE_LENGTH - 7) + " |\n")
         out += ("| Criteria:  " +
-                self.criteria.ljust(LINE_LENGTH - 14) + "|\n")
+                str(self.criteria).ljust(LINE_LENGTH - 14) + "|\n")
         out += ("| Dependencies:".ljust(LINE_LENGTH - 1) + "|\n")
         installer = self.dependency.get_installer()
         scenario = self.dependency.get_scenario()
index 5f8a087..d65f5a3 100644 (file)
@@ -59,7 +59,7 @@ class Feature(base.TestCase):
         It sets the following attributes required to push the results
         to DB:
 
-            * criteria,
+            * result,
             * start_time,
             * stop_time.
 
@@ -74,15 +74,15 @@ class Feature(base.TestCase):
         """
         self.start_time = time.time()
         exit_code = base.TestCase.EX_RUN_ERROR
-        self.criteria = "FAIL"
+        self.result = 0
         try:
             if self.execute(**kwargs) == 0:
                 exit_code = base.TestCase.EX_OK
-                self.criteria = 'PASS'
+                self.result = 100
             ft_utils.logger_test_results(
                 self.project_name, self.case_name,
-                self.criteria, self.details)
-            self.logger.info("%s %s", self.project_name, self.criteria)
+                self.result, self.details)
+            self.logger.info("%s %s", self.project_name, self.result)
         except Exception:  # pylint: disable=broad-except
             self.logger.exception("%s FAILED", self.project_name)
         self.logger.info("Test result is stored in '%s'", self.result_file)
index 775f0a6..8b5da05 100644 (file)
@@ -48,10 +48,10 @@ class PyTestSuiteRunner(base.TestCase):
         if ((result.errors and len(result.errors) > 0)
                 or (result.failures and len(result.failures) > 0)):
             self.logger.info("%s FAILED" % self.case_name)
-            self.criteria = 'FAIL'
+            self.result = 'FAIL'
         else:
             self.logger.info("%s OK" % self.case_name)
-            self.criteria = 'PASS'
+            self.result = 'PASS'
 
         self.details = {}
         return exit_code
index 309842e..3f191b4 100644 (file)
@@ -38,26 +38,37 @@ class TestCase(object):
         self.details = {}
         self.project_name = kwargs.get('project_name', 'functest')
         self.case_name = kwargs.get('case_name', '')
-        self.criteria = ""
+        self.criteria = kwargs.get('criteria', 100)
+        self.result = ""
         self.start_time = ""
         self.stop_time = ""
 
-    def check_criteria(self):
-        """Interpret the results of the test case.
+    def check_result(self):
+        """Interpret the result of the test case.
 
-        It allows getting the results of TestCase. It completes run()
+        It allows getting the result of TestCase. It completes run()
         which only returns the execution status.
 
-        It can be overriden if checking criteria is not suitable.
+        It can be overriden if checking result is not suitable.
 
         Returns:
-            TestCase.EX_OK if criteria is 'PASS'.
+            TestCase.EX_OK if result is 'PASS'.
             TestCase.EX_TESTCASE_FAILED otherwise.
         """
         try:
             assert self.criteria
-            if self.criteria == 'PASS':
-                return TestCase.EX_OK
+            if isinstance(self.result, int) and isinstance(self.criteria, int):
+                if self.result >= self.criteria:
+                    return TestCase.EX_OK
+            else:
+                # Backward compatibility
+                # It must be removed as soon as TestCase subclasses
+                # stop setting result = 'PASS' or 'FAIL'.
+                # In this case criteria is unread.
+                self.logger.warning(
+                    "Please update result which must be an int!")
+                if self.result == 'PASS':
+                    return TestCase.EX_OK
         except AssertionError:
             self.logger.error("Please run test before checking the results")
         return TestCase.EX_TESTCASE_FAILED
@@ -74,7 +85,7 @@ class TestCase(object):
         The new implementation must set the following attributes to
         push the results to DB:
 
-            * criteria,
+            * result,
             * start_time,
             * stop_time.
 
@@ -99,7 +110,7 @@ class TestCase(object):
 
             * project_name,
             * case_name,
-            * criteria,
+            * result,
             * start_time,
             * stop_time.
 
@@ -110,12 +121,13 @@ class TestCase(object):
         try:
             assert self.project_name
             assert self.case_name
-            assert self.criteria
             assert self.start_time
             assert self.stop_time
+            pub_result = 'PASS' if self.check_result(
+                ) == TestCase.EX_OK else 'FAIL'
             if ft_utils.push_results_to_db(
                     self.project_name, self.case_name, self.start_time,
-                    self.stop_time, self.criteria, self.details):
+                    self.stop_time, pub_result, self.details):
                 self.logger.info("The results were successfully pushed to DB")
                 return TestCase.EX_OK
             else:
index 2de28c1..fe4e427 100644 (file)
@@ -196,19 +196,19 @@ class VnfOnBoardingBase(base.TestCase):
 
     def parse_results(self):
         exit_code = self.EX_OK
-        self.criteria = "PASS"
+        self.result = "PASS"
         self.logger.info(self.details)
         # The 2 VNF steps must be OK to get a PASS result
         if (self.details['vnf']['status'] is not "PASS" or
                 self.details['test_vnf']['status'] is not "PASS"):
             exit_code = self.EX_RUN_ERROR
-            self.criteria = "FAIL"
+            self.result = "FAIL"
         return exit_code
 
     def log_results(self):
         ft_utils.logger_test_results(self.project_name,
                                      self.case_name,
-                                     self.criteria,
+                                     self.result,
                                      self.details)
 
     def step_failure(self, error_msg):
index b42801d..8a40940 100644 (file)
@@ -16,8 +16,5 @@ class BarometerCollectd(base.Feature):
     Class for executing barometercollectd testcase.
     '''
 
-    def __init__(self, **kwargs):
-        super(BarometerCollectd, self).__init__(**kwargs)
-
     def execute(self):
         return collectd.main(self.logger)
index ad8745c..e07e2a8 100644 (file)
@@ -500,12 +500,12 @@ class RallyBase(testcase.TestCase):
                                     'nb tests': total_nb_tests,
                                     'nb success': success_rate}})
 
-        self.criteria = ft_utils.check_success_rate(
+        self.result = ft_utils.check_success_rate(
             self.case_name, success_rate)
         self.details = payload
 
         logger.info("Rally '%s' success_rate is %s%%, is marked as %s"
-                    % (self.case_name, success_rate, self.criteria))
+                    % (self.case_name, success_rate, self.result))
 
     def _clean_up(self):
         if self.volume_type:
index 01ddf16..2f2fc00 100755 (executable)
@@ -133,10 +133,10 @@ class RefstackClient(testcase.TestCase):
         except Exception:
             success_rate = 0
 
-        self.criteria = ft_utils.check_success_rate(
+        self.result = ft_utils.check_success_rate(
             self.case_name, success_rate)
         logger.info("Testcase %s success_rate is %s%%, is marked as %s"
-                    % (self.case_name, success_rate, self.criteria))
+                    % (self.case_name, success_rate, self.result))
 
     def run(self):
         '''used for functest command line,
index 8b175c2..e6c6b44 100644 (file)
@@ -200,19 +200,17 @@ class TempestCommon(testcase.TestCase):
         except Exception:
             success_rate = 0
 
-        self.criteria = ft_utils.check_success_rate(
+        self.result = ft_utils.check_success_rate(
             self.case_name, success_rate)
         logger.info("Tempest %s success_rate is %s%%, is marked as %s"
-                    % (self.case_name, success_rate, self.criteria))
+                    % (self.case_name, success_rate, self.result))
 
     def run(self):
 
         self.start_time = time.time()
-
-        if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR):
-            os.makedirs(conf_utils.TEMPEST_RESULTS_DIR)
-
         try:
+            if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR):
+                os.makedirs(conf_utils.TEMPEST_RESULTS_DIR)
             image_and_flavor = conf_utils.create_tempest_resources()
             conf_utils.configure_tempest(
                 self.DEPLOYMENT_DIR,
index 8f7cc1d..8bf263e 100644 (file)
@@ -273,7 +273,7 @@ class VPingBase(testcase.TestCase):
         self.details = {'timestart': start_time,
                         'duration': duration,
                         'status': test_status}
-        self.criteria = test_status
+        self.result = test_status
 
     @staticmethod
     def pMsg(msg):
index acd004b..6f4acf6 100755 (executable)
@@ -100,7 +100,7 @@ class ODLTests(testcase.TestCase):
         result = robot.api.ExecutionResult(xml_file)
         visitor = ODLResultVisitor()
         result.visit(visitor)
-        self.criteria = result.suite.status
+        self.result = result.suite.status
         self.start_time = timestamp_to_secs(result.suite.starttime)
         self.stop_time = timestamp_to_secs(result.suite.endtime)
         self.details = {}
index fe496c1..4d489d6 100644 (file)
@@ -21,16 +21,21 @@ import functest.utils.functest_utils as ft_utils
 import functest.utils.openstack_utils as openstack_utils
 
 
-logger = ft_logger.Logger(__name__).getLogger()
-
-
 class OnosBase(testcase.TestCase):
-    onos_repo_path = CONST.dir_repo_onos
-    onos_sfc_image_name = CONST.onos_sfc_image_name
-    onos_sfc_image_path = os.path.join(CONST.dir_functest_data,
-                                       CONST.onos_sfc_image_file_name)
-    onos_sfc_path = os.path.join(CONST.dir_repo_functest,
-                                 CONST.dir_onos_sfc)
+    onos_repo_path = CONST.__getattribute__('dir_repo_onos')
+    onos_sfc_image_name = CONST.__getattribute__('onos_sfc_image_name')
+    onos_sfc_image_path = os.path.join(
+        CONST.__getattribute__('dir_functest_data'),
+        CONST.__getattribute__('onos_sfc_image_file_name'))
+    onos_sfc_path = os.path.join(CONST.__getattribute__('dir_repo_functest'),
+                                 CONST.__getattribute__('dir_onos_sfc'))
+    installer_type = CONST.__getattribute__('INSTALLER_TYPE')
+    logger = ft_logger.Logger(__name__).getLogger()
+
+    def __init__(self, **kwargs):
+        if "case_name" not in kwargs:
+            kwargs["case_name"] = "onos_base"
+        super(OnosBase, self).__init__(**kwargs)
 
     def run(self):
         self.start_time = time.time()
@@ -38,7 +43,7 @@ class OnosBase(testcase.TestCase):
             self._run()
             res = testcase.TestCase.EX_OK
         except Exception as e:
-            logger.error('Error with run: %s', e)
+            self.logger.error('Error with run: %s', e)
             res = testcase.TestCase.EX_RUN_ERROR
 
         self.stop_time = time.time()
@@ -56,20 +61,20 @@ class Onos(OnosBase):
         self.log_path = os.path.join(self.onos_repo_path, 'TestON/logs')
 
     def set_onos_ip(self):
-        if (CONST.INSTALLER_TYPE and
-                CONST.INSTALLER_TYPE.lower() == 'joid'):
+        if (self.installer_type and
+                self.installer_type.lower() == 'joid'):
             sdn_controller_env = os.getenv('SDN_CONTROLLER')
             OC1 = re.search(r"\d+\.\d+\.\d+\.\d+", sdn_controller_env).group()
         else:
             neutron_url = openstack_utils.get_endpoint(service_type='network')
             OC1 = urlparse.urlparse(neutron_url).hostname
         os.environ['OC1'] = OC1
-        logger.debug("ONOS IP is %s" % OC1)
+        self.logger.debug("ONOS IP is %s", OC1)
 
     def run_onos_script(self, testname):
         cli_dir = os.path.join(self.onos_repo_path, 'TestON/bin/cli.py')
         cmd = '{0} run {1}'.format(cli_dir, testname)
-        logger.debug("Run script: %s" % testname)
+        self.logger.debug("Run script: %s", testname)
         ft_utils.execute_command_raise(
             cmd,
             error_msg=('Error when running ONOS script: %s'
@@ -84,8 +89,8 @@ class Onos(OnosBase):
                 elif os.path.isfile(log):
                     os.remove(log)
             except OSError as e:
-                logger.error('Error with deleting file %s: %s',
-                             log, e.strerror)
+                self.logger.error('Error with deleting file %s: %s',
+                                  log, e.strerror)
 
     def get_result(self):
         cmd = 'grep -rnh Fail {0}'.format(self.log_path)
@@ -95,9 +100,9 @@ class Onos(OnosBase):
                              stderr=subprocess.STDOUT)
 
         for line in p.stdout:
-            logger.debug(line)
+            self.logger.debug(line)
             if re.search("\s+[1-9]+\s+", line):
-                logger.debug("Testcase Fails\n" + line)
+                self.logger.debug("Testcase Fails\n" + line)
 
         cmd = "grep -rnh 'Execution Time' {0}".format(self.log_path)
         result_buffer = os.popen(cmd).read()
@@ -155,10 +160,10 @@ class Onos(OnosBase):
             if (result['FUNCvirNet']['result'] == "Success" and
                     result['FUNCvirNetL3']['result'] == "Success"):
                 status = "PASS"
-        except:
-            logger.error("Unable to set ONOS criteria")
+        except Exception:
+            self.logger.error("Unable to set ONOS result")
 
-        self.criteria = status
+        self.result = status
         self.details = result
 
     def _run(self):
@@ -170,13 +175,14 @@ class Onos(OnosBase):
 
 
 class OnosSfc(OnosBase):
-    def __init__(self):
-        super(OnosSfc, self).__init__()
-        self.case_name = 'onos_sfc'
+    def __init__(self, **kwargs):
+        if "case_name" not in kwargs:
+            kwargs["case_name"] = "onos_sfc"
+        super(OnosSfc, self).__init__(**kwargs)
 
-    def get_ip(type):
+    def get_ip(self, type):
         url = openstack_utils.get_endpoint(service_type=type)
-        logger.debug('get_ip for %s: %s' % (type, url))
+        self.logger.debug('get_ip for %s: %s', type, url)
         return urlparse.urlparse(url).hostname
 
     def update_sfc_onos_file(self, before, after):
@@ -190,6 +196,7 @@ class OnosSfc(OnosBase):
                        % (before, after)))
 
     def create_image(self):
+        self.logger.warn('inside create_image')
         glance_client = openstack_utils.get_glance_client()
         image_id = openstack_utils.create_glance_image(
             glance_client,
@@ -198,19 +205,20 @@ class OnosSfc(OnosBase):
         if image_id is None:
             raise Exception('Failed to create image')
 
-        logger.debug("Image '%s' with ID=%s is created successfully."
-                     % (self.onos_sfc_image_name, image_id))
+        self.logger.debug("Image '%s' with ID=%s is created successfully.",
+                          self.onos_sfc_image_name, image_id)
 
     def set_sfc_conf(self):
         self.update_sfc_onos_file("keystone_ip", self.get_ip("keystone"))
         self.update_sfc_onos_file("neutron_ip", self.get_ip("neutron"))
         self.update_sfc_onos_file("nova_ip", self.get_ip("nova"))
         self.update_sfc_onos_file("glance_ip", self.get_ip("glance"))
-        self.update_sfc_onos_file("console", CONST.OS_PASSWORD)
+        self.update_sfc_onos_file("console",
+                                  CONST.__getattribute__('OS_PASSWORD'))
         neutron_client = openstack_utils.get_neutron_client()
         ext_net = openstack_utils.get_external_net(neutron_client)
         self.update_sfc_onos_file("admin_floating_net", ext_net)
-        logger.debug("SFC configuration is modified")
+        self.logger.debug("SFC configuration is modified")
 
     def sfc_test(self):
         cmd = 'python {0}'.format(os.path.join(self.onos_sfc_path, 'sfc.py'))
index 6de65bc..9c94cfb 100755 (executable)
@@ -49,7 +49,7 @@ class AaaVnf(vnf_base.VnfOnBoardingBase):
     def main(self, **kwargs):
         self.logger.info("AAA VNF onboarding")
         self.execute()
-        if self.criteria is "PASS":
+        if self.result is "PASS":
             return self.EX_OK
         else:
             return self.EX_RUN_ERROR
index e351e0d..0e6d479 100644 (file)
@@ -277,7 +277,7 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
         self.logger.info("Cloudify IMS VNF onboarding test starting")
         self.execute()
         self.logger.info("Cloudify IMS VNF onboarding test executed")
-        if self.criteria is "PASS":
+        if self.result is "PASS":
             return self.EX_OK
         else:
             return self.EX_RUN_ERROR
index 7ca96ae..a46f9d7 100644 (file)
@@ -119,7 +119,7 @@ class OperaIms(clearwater_ims_base.ClearwaterOnBoardingBase):
         self.logger.info("Start to run Opera vIMS VNF onboarding test")
         self.execute()
         self.logger.info("Opera vIMS VNF onboarding test finished")
-        if self.criteria is "PASS":
+        if self.result is "PASS":
             return self.EX_OK
         else:
             return self.EX_RUN_ERROR
index c95a17e..351c5fb 100755 (executable)
@@ -484,7 +484,7 @@ class ImsVnf(vnf_base.VnfOnBoardingBase):
         self.logger.info("Orchestra IMS VNF onboarding test starting")
         self.execute()
         self.logger.info("Orchestra IMS VNF onboarding test executed")
-        if self.criteria is "PASS":
+        if self.result is "PASS":
             return self.EX_OK
         else:
             return self.EX_RUN_ERROR
index e6d2284..5654278 100644 (file)
@@ -29,6 +29,6 @@ class VrouterVnf(base.Feature):
             f.close()
 
     def log_results(self):
-        if self.criteria == 'PASS':
+        if self.result == 'PASS':
             self.set_result_details()
         super(VrouterVnf, self).log_results()
index bd7197f..993da5a 100644 (file)
@@ -35,9 +35,9 @@ class FeatureTestingBase(unittest.TestCase):
     def _test_run(self, status, mock_method=None):
         self.assertEqual(self.feature.run(cmd=self._cmd), status)
         if status == testcase.TestCase.EX_OK:
-            self.assertEqual(self.feature.criteria, 'PASS')
+            self.assertEqual(self.feature.result, 100)
         else:
-            self.assertEqual(self.feature.criteria, 'FAIL')
+            self.assertEqual(self.feature.result, 0)
         mock_method.assert_has_calls([mock.call(), mock.call()])
         self.assertEqual(self.feature.start_time, 1)
         self.assertEqual(self.feature.stop_time, 2)
index 4f3b25c..cc8446d 100644 (file)
@@ -28,13 +28,14 @@ class TestCaseTesting(unittest.TestCase):
 
     _case_name = "base"
     _project_name = "functest"
+    _published_result = "PASS"
 
     def setUp(self):
         self.test = testcase.TestCase(case_name=self._case_name,
                                       project_name=self._project_name)
         self.test.start_time = "1"
         self.test.stop_time = "2"
-        self.test.criteria = "PASS"
+        self.test.result = 100
         self.test.details = {"Hello": "World"}
 
     def test_run_unimplemented(self):
@@ -56,10 +57,6 @@ class TestCaseTesting(unittest.TestCase):
         self.test.case_name = None
         self._test_missing_attribute()
 
-    def test_missing_criteria(self):
-        self.test.criteria = None
-        self._test_missing_attribute()
-
     def test_missing_start_time(self):
         self.test.start_time = None
         self._test_missing_attribute()
@@ -76,7 +73,7 @@ class TestCaseTesting(unittest.TestCase):
                          testcase.TestCase.EX_OK)
         mock_function.assert_called_once_with(
             self._project_name, self._case_name, self.test.start_time,
-            self.test.stop_time, self.test.criteria, self.test.details)
+            self.test.stop_time, self._published_result, self.test.details)
 
     @mock.patch('functest.utils.functest_utils.push_results_to_db',
                 return_value=False)
@@ -85,7 +82,7 @@ class TestCaseTesting(unittest.TestCase):
                          testcase.TestCase.EX_PUSH_TO_DB_ERROR)
         mock_function.assert_called_once_with(
             self._project_name, self._case_name, self.test.start_time,
-            self.test.stop_time, self.test.criteria, self.test.details)
+            self.test.stop_time, self._published_result, self.test.details)
 
     @mock.patch('functest.utils.functest_utils.push_results_to_db',
                 return_value=True)
@@ -94,23 +91,77 @@ class TestCaseTesting(unittest.TestCase):
                          testcase.TestCase.EX_OK)
         mock_function.assert_called_once_with(
             self._project_name, self._case_name, self.test.start_time,
-            self.test.stop_time, self.test.criteria, self.test.details)
+            self.test.stop_time, self._published_result, self.test.details)
+
+    @mock.patch('functest.utils.functest_utils.push_results_to_db',
+                return_value=True)
+    def test_push_to_db_res_ko(self, mock_function=None):
+        self.test.result = 0
+        self.assertEqual(self.test.push_to_db(),
+                         testcase.TestCase.EX_OK)
+        mock_function.assert_called_once_with(
+            self._project_name, self._case_name, self.test.start_time,
+            self.test.stop_time, 'FAIL', self.test.details)
+
+    @mock.patch('functest.utils.functest_utils.push_results_to_db',
+                return_value=True)
+    def test_push_to_db_both_ko(self, mock_function=None):
+        self.test.result = 0
+        self.test.criteria = 0
+        self.assertEqual(self.test.push_to_db(),
+                         testcase.TestCase.EX_OK)
+        mock_function.assert_called_once_with(
+            self._project_name, self._case_name, self.test.start_time,
+            self.test.stop_time, 'FAIL', self.test.details)
 
     def test_check_criteria_missing(self):
         self.test.criteria = None
-        self.assertEqual(self.test.check_criteria(),
+        self.assertEqual(self.test.check_result(),
                          testcase.TestCase.EX_TESTCASE_FAILED)
 
-    def test_check_criteria_failed(self):
-        self.test.criteria = 'FAILED'
-        self.assertEqual(self.test.check_criteria(),
+    def test_check_result_missing(self):
+        self.test.result = None
+        self.assertEqual(self.test.check_result(),
                          testcase.TestCase.EX_TESTCASE_FAILED)
 
-    def test_check_criteria_pass(self):
-        self.test.criteria = 'PASS'
-        self.assertEqual(self.test.check_criteria(),
+    def test_check_result_failed(self):
+        # Backward compatibility
+        # It must be removed as soon as TestCase subclasses
+        # stop setting result = 'PASS' or 'FAIL'.
+        self.test.result = 'FAIL'
+        self.assertEqual(self.test.check_result(),
+                         testcase.TestCase.EX_TESTCASE_FAILED)
+
+    def test_check_result_pass(self):
+        # Backward compatibility
+        # It must be removed as soon as TestCase subclasses
+        # stop setting result = 'PASS' or 'FAIL'.
+        self.test.result = 'PASS'
+        self.assertEqual(self.test.check_result(),
                          testcase.TestCase.EX_OK)
 
+    def test_check_result_lt(self):
+        self.test.result = 50
+        self.assertEqual(self.test.check_result(),
+                         testcase.TestCase.EX_TESTCASE_FAILED)
+
+    def test_check_result_eq(self):
+        self.test.result = 100
+        self.assertEqual(self.test.check_result(),
+                         testcase.TestCase.EX_OK)
+
+    def test_check_result_gt(self):
+        self.test.criteria = 50
+        self.test.result = 100
+        self.assertEqual(self.test.check_result(),
+                         testcase.TestCase.EX_OK)
+
+    def test_check_result_zero(self):
+        self.test.criteria = 0
+        self.test.result = 0
+        self.assertEqual(self.test.check_result(),
+                         testcase.TestCase.EX_TESTCASE_FAILED)
+
 
 if __name__ == "__main__":
     unittest.main(verbosity=2)
index 9670604..540cf61 100644 (file)
@@ -23,7 +23,7 @@ class VnfBaseTesting(unittest.TestCase):
         self.test.project = "functest"
         self.test.start_time = "1"
         self.test.stop_time = "5"
-        self.test.criteria = ""
+        self.test.result = ""
         self.test.details = {"orchestrator": {"status": "PASS",
                                               "result": "",
                                               "duration": 20},
index 55e100d..8046934 100644 (file)
@@ -123,7 +123,7 @@ class ODLParseResultTesting(ODLTesting):
         with mock.patch('robot.api.ExecutionResult',
                         return_value=mock.Mock(suite=suite)):
             self.test.parse_results()
-            self.assertEqual(self.test.criteria, config['status'])
+            self.assertEqual(self.test.result, config['status'])
             self.assertEqual(self.test.start_time,
                              timestamp_to_secs(config['starttime']))
             self.assertEqual(self.test.stop_time,
@@ -105,7 +105,7 @@ class OSTempestTesting(unittest.TestCase):
         self._test_generate_test_list_mode_default('full')
 
     def test_parse_verifier_result_missing_verification_uuid(self):
-        self.tempestcommon.VERIFICATION_ID = ''
+        self.tempestcommon.VERIFICATION_ID = None
         with self.assertRaises(Exception):
             self.tempestcommon.parse_verifier_result()
 
@@ -169,100 +169,82 @@ class OSTempestTesting(unittest.TestCase):
             self.tempestcommon.parse_verifier_result()
             mock_method.assert_any_call('test_case_name', 100)
 
-    def test_run_missing_create_tempest_dir(self):
-        ret = testcase.TestCase.EX_RUN_ERROR
-        with mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                        'os.path.exists', return_value=False), \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'os.makedirs') as mock_os_makedirs, \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'conf_utils.create_tempest_resources',
-                       return_value="image_and_flavor"):
-            self.assertEqual(self.tempestcommon.run(),
-                             ret)
-            self.assertTrue(mock_os_makedirs.called)
-
-    def test_run_missing_configure_tempest(self):
-        ret = testcase.TestCase.EX_RUN_ERROR
-        ret_ok = testcase.TestCase.EX_OK
-        with mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                        'os.path.exists', return_value=False), \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'os.makedirs') as mock_os_makedirs, \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'conf_utils.create_tempest_resources',
-                       return_value=ret_ok), \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'conf_utils.configure_tempest',
-                       return_value=ret):
-            self.assertEqual(self.tempestcommon.run(),
-                             ret)
-            self.assertTrue(mock_os_makedirs.called)
+    @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
+                'os.path.exists', return_value=False)
+    @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs',
+                side_effect=Exception)
+    def test_run_makedirs_ko(self, *args):
+        self.assertEqual(self.tempestcommon.run(),
+                         testcase.TestCase.EX_RUN_ERROR)
+
+    @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
+                'os.path.exists', return_value=False)
+    @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs')
+    @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
+                'conf_utils.create_tempest_resources', side_effect=Exception)
+    def test_run_create_tempest_resources_ko(self, *args):
+        self.assertEqual(self.tempestcommon.run(),
+                         testcase.TestCase.EX_RUN_ERROR)
+
+    @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
+                'os.path.exists', return_value=False)
+    @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs')
+    @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
+                'conf_utils.create_tempest_resources', return_value={})
+    @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
+                'conf_utils.configure_tempest', side_effect=Exception)
+    def test_run_configure_tempest_ko(self, *args):
+        self.assertEqual(self.tempestcommon.run(),
+                         testcase.TestCase.EX_RUN_ERROR)
+
+    @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
+                'os.path.exists', return_value=False)
+    @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs')
+    @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
+                'conf_utils.create_tempest_resources', return_value={})
+    @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
+                'conf_utils.configure_tempest')
+    def _test_run(self, status, *args):
+        self.assertEqual(self.tempestcommon.run(), status)
 
     def test_run_missing_generate_test_list(self):
-        ret = testcase.TestCase.EX_RUN_ERROR
-        ret_ok = testcase.TestCase.EX_OK
-        with mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                        'os.path.exists', return_value=False), \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'os.makedirs') as mock_os_makedirs, \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'conf_utils.create_tempest_resources',
-                       return_value=ret_ok), \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'conf_utils.configure_tempest',
-                       return_value=ret_ok), \
-            mock.patch.object(self.tempestcommon, 'generate_test_list',
-                              return_value=ret):
-            self.assertEqual(self.tempestcommon.run(),
-                             ret)
-            self.assertTrue(mock_os_makedirs.called)
-
-    def test_run_missing_apply_tempest_blacklist(self):
-        ret = testcase.TestCase.EX_RUN_ERROR
-        ret_ok = testcase.TestCase.EX_OK
-        with mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                        'os.path.exists', return_value=False), \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'os.makedirs') as mock_os_makedirs, \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'conf_utils.create_tempest_resources',
-                       return_value=ret_ok), \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'conf_utils.configure_tempest',
-                       return_value=ret_ok), \
-            mock.patch.object(self.tempestcommon, 'generate_test_list',
-                              return_value=ret_ok), \
-            mock.patch.object(self.tempestcommon, 'apply_tempest_blacklist',
-                              return_value=ret):
-            self.assertEqual(self.tempestcommon.run(),
-                             ret)
-            self.assertTrue(mock_os_makedirs.called)
-
-    def test_run_missing_parse_verifier_result(self):
-        ret = testcase.TestCase.EX_RUN_ERROR
-        ret_ok = testcase.TestCase.EX_OK
-        with mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                        'os.path.exists', return_value=False), \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'os.makedirs') as mock_os_makedirs, \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'conf_utils.create_tempest_resources',
-                       return_value=ret_ok), \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'conf_utils.configure_tempest',
-                       return_value=ret_ok), \
-            mock.patch.object(self.tempestcommon, 'generate_test_list',
-                              return_value=ret_ok), \
-            mock.patch.object(self.tempestcommon, 'apply_tempest_blacklist',
-                              return_value=ret_ok), \
-            mock.patch.object(self.tempestcommon, 'run_verifier_tests',
-                              return_value=ret_ok), \
-            mock.patch.object(self.tempestcommon, 'parse_verifier_result',
-                              return_value=ret):
-            self.assertEqual(self.tempestcommon.run(),
-                             ret)
-            self.assertTrue(mock_os_makedirs.called)
+        with mock.patch.object(self.tempestcommon, 'generate_test_list',
+                               side_effect=Exception):
+            self._test_run(testcase.TestCase.EX_RUN_ERROR)
+
+    def test_run_apply_tempest_blacklist_ko(self):
+        with mock.patch.object(self.tempestcommon, 'generate_test_list'), \
+                    mock.patch.object(self.tempestcommon,
+                                      'apply_tempest_blacklist',
+                                      side_effect=Exception()):
+            self._test_run(testcase.TestCase.EX_RUN_ERROR)
+
+    def test_run_verifier_tests_ko(self, *args):
+        with mock.patch.object(self.tempestcommon, 'generate_test_list'), \
+                mock.patch.object(self.tempestcommon,
+                                  'apply_tempest_blacklist'), \
+                mock.patch.object(self.tempestcommon, 'run_verifier_tests',
+                                  side_effect=Exception()), \
+                mock.patch.object(self.tempestcommon, 'parse_verifier_result',
+                                  side_effect=Exception):
+            self._test_run(testcase.TestCase.EX_RUN_ERROR)
+
+    def test_run_parse_verifier_result_ko(self, *args):
+        with mock.patch.object(self.tempestcommon, 'generate_test_list'), \
+                mock.patch.object(self.tempestcommon,
+                                  'apply_tempest_blacklist'), \
+                mock.patch.object(self.tempestcommon, 'run_verifier_tests'), \
+                mock.patch.object(self.tempestcommon, 'parse_verifier_result',
+                                  side_effect=Exception):
+            self._test_run(testcase.TestCase.EX_RUN_ERROR)
+
+    def test_run(self, *args):
+        with mock.patch.object(self.tempestcommon, 'generate_test_list'), \
+                mock.patch.object(self.tempestcommon,
+                                  'apply_tempest_blacklist'), \
+                mock.patch.object(self.tempestcommon, 'run_verifier_tests'), \
+                mock.patch.object(self.tempestcommon, 'parse_verifier_result'):
+            self._test_run(testcase.TestCase.EX_OK)
 
 
 if __name__ == "__main__":
diff --git a/functest/tests/unit/opnfv_tests/vnf/__init__.py b/functest/tests/unit/opnfv_tests/vnf/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/functest/tests/unit/opnfv_tests/vnf/ims/__init__.py b/functest/tests/unit/opnfv_tests/vnf/ims/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
index 22cadf0..573fcb7 100644 (file)
@@ -41,8 +41,8 @@ class FunctestUtilsTesting(unittest.TestCase):
         self.status = 'test_status'
         self.details = 'test_details'
         self.db_url = 'test_db_url'
-        self.success_rate = 2.0
-        self.criteria = 'test_criteria==2.0'
+        self.criteria = 50
+        self.result = 75
         self.start_date = 1482624000
         self.stop_date = 1482624000
         self.start_time = time.time()
@@ -279,7 +279,7 @@ class FunctestUtilsTesting(unittest.TestCase):
                 as mock_logger_error:
             functest_utils.push_results_to_db(self.project, self.case_name,
                                               self.start_date, self.stop_date,
-                                              self.criteria, self.details)
+                                              self.result, self.details)
             mock_logger_error.assert_called_once_with("Please set env var: " +
                                                       str("\'" + env_var +
                                                           "\'"))
@@ -311,7 +311,7 @@ class FunctestUtilsTesting(unittest.TestCase):
                              push_results_to_db(self.project, self.case_name,
                                                 self.start_date,
                                                 self.stop_date,
-                                                self.criteria, self.details))
+                                                self.result, self.details))
             mock_logger_error.assert_called_once_with(test_utils.
                                                       RegexMatch("Pushing "
                                                                  "Result to"
@@ -334,7 +334,7 @@ class FunctestUtilsTesting(unittest.TestCase):
                              push_results_to_db(self.project, self.case_name,
                                                 self.start_date,
                                                 self.stop_date,
-                                                self.criteria, self.details))
+                                                self.result, self.details))
             self.assertTrue(mock_logger_error.called)
 
     def test_push_results_to_db_default(self):
@@ -349,7 +349,7 @@ class FunctestUtilsTesting(unittest.TestCase):
                             push_results_to_db(self.project, self.case_name,
                                                self.start_date,
                                                self.stop_date,
-                                               self.criteria, self.details))
+                                               self.result, self.details))
     readline = 0
     test_ip = ['10.1.23.4', '10.1.14.15', '10.1.16.15']
 
@@ -566,7 +566,7 @@ class FunctestUtilsTesting(unittest.TestCase):
                 as mock_criteria:
             mock_criteria.return_value = self.criteria
             resp = functest_utils.check_success_rate(self.case_name,
-                                                     self.success_rate)
+                                                     self.result)
             self.assertEqual(resp, 'PASS')
 
     def test_check_success_rate_failed(self):
@@ -574,7 +574,7 @@ class FunctestUtilsTesting(unittest.TestCase):
                 as mock_criteria:
             mock_criteria.return_value = self.criteria
             resp = functest_utils.check_success_rate(self.case_name,
-                                                     3.0)
+                                                     0)
             self.assertEqual(resp, 'FAIL')
 
     # TODO: merge_dicts
index 555e9c2..ba52829 100644 (file)
@@ -41,6 +41,13 @@ ignore = ["paramiko",
 
 class Logger(object):
 
+    instance = None
+
+    def __new__(cls, logger_name):
+        if cls.instance is None:
+            cls.instance = object.__new__(cls)
+        return cls.instance
+
     def __init__(self, logger_name):
         self.setup_logging()
         self.logger = logging.getLogger(logger_name)
index 0d61241..7d993cb 100644 (file)
@@ -192,7 +192,7 @@ def logger_test_results(project, case_name, status, details):
 
 @decorators.can_dump_request_to_file
 def push_results_to_db(project, case_name,
-                       start_date, stop_date, criteria, details):
+                       start_date, stop_date, result, details):
     """
     POST results to the Result target DB
     """
@@ -213,7 +213,7 @@ def push_results_to_db(project, case_name,
 
     params = {"project_name": project, "case_name": case_name,
               "pod_name": pod_name, "installer": installer,
-              "version": version, "scenario": scenario, "criteria": criteria,
+              "version": version, "scenario": scenario, "criteria": result,
               "build_tag": build_tag, "start_date": test_start,
               "stop_date": test_stop, "details": details}
 
@@ -248,7 +248,7 @@ def push_results_to_db(project, case_name,
                      'pod': pod_name,
                      'v': version,
                      's': scenario,
-                     'c': criteria,
+                     'c': result,
                      't': build_tag,
                      'd': details,
                      'error': e
@@ -379,23 +379,14 @@ def get_functest_config(parameter):
     return get_parameter_from_yaml(parameter, yaml_)
 
 
-def check_success_rate(case_name, success_rate):
-    success_rate = float(success_rate)
+def check_success_rate(case_name, result):
+    # It should be removed as TestCase tests criteria
+    # and result.
+    logger.warning('check_success_rate will be removed soon')
     criteria = get_criteria_by_test(case_name)
-
-    def get_criteria_value(op):
-        return float(criteria.split(op)[1].rstrip('%'))
-
-    status = 'FAIL'
-    ops = ['==', '>=']
-    for op in ops:
-        if op in criteria:
-            c_value = get_criteria_value(op)
-            if eval("%s %s %s" % (success_rate, op, c_value)):
-                status = 'PASS'
-            break
-
-    return status
+    if type(criteria) == int and result >= criteria:
+        return 'PASS'
+    return 'FAIL'
 
 
 def merge_dicts(dict1, dict2):