Remove check_success_rate
authorjose.lausuch <jose.lausuch@ericsson.com>
Wed, 17 May 2017 12:20:43 +0000 (14:20 +0200)
committerjose.lausuch <jose.lausuch@ericsson.com>
Thu, 18 May 2017 11:26:37 +0000 (13:26 +0200)
After the introduction of is_successful method
in TestCase class, this is not needed anymore.

Change-Id: I70f6f9e10495601cd5443467347b97efe27b81a3
Signed-off-by: jose.lausuch <jose.lausuch@ericsson.com>
functest/opnfv_tests/openstack/rally/rally.py
functest/opnfv_tests/openstack/refstack_client/refstack_client.py
functest/opnfv_tests/openstack/tempest/tempest.py
functest/tests/unit/openstack/rally/test_rally.py
functest/tests/unit/openstack/tempest/test_tempest.py
functest/tests/unit/utils/test_functest_utils.py
functest/utils/functest_utils.py

index f762383..fbed0ce 100644 (file)
@@ -8,6 +8,8 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 #
 
+from __future__ import division
+
 import json
 import logging
 import os
@@ -20,7 +22,6 @@ import yaml
 
 from functest.core import testcase
 from functest.utils.constants import CONST
-import functest.utils.functest_utils as ft_utils
 import functest.utils.openstack_utils as os_utils
 
 logger = logging.getLogger(__name__)
@@ -480,11 +481,12 @@ class RallyBase(testcase.TestCase):
         total_duration_str2 = "{0:<10}".format(total_duration_str)
         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
 
-        if len(self.summary):
-            success_rate = total_success / len(self.summary)
-        else:
-            success_rate = 100
-        success_rate = "{:0.2f}".format(success_rate)
+        try:
+            self.result = total_success / len(self.summary)
+        except ZeroDivisionError:
+            self.result = 100
+
+        success_rate = "{:0.2f}".format(self.result)
         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
         report += ("+===================+============"
                    "+===============+===========+")
@@ -500,12 +502,10 @@ class RallyBase(testcase.TestCase):
                                     'nb tests': total_nb_tests,
                                     'nb success': success_rate}})
 
-        self.result = ft_utils.check_success_rate(
-            self.case_name, success_rate)
         self.details = payload
 
-        logger.info("Rally '%s' success_rate is %s%%, is marked as %s"
-                    % (self.case_name, success_rate, self.result))
+        logger.info("Rally '%s' success_rate is %s%%"
+                    % (self.case_name, success_rate))
 
     def _clean_up(self):
         if self.volume_type:
index ebae4b8..2a2718d 100755 (executable)
@@ -5,6 +5,10 @@
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
+
+from __future__ import division
+
+
 import argparse
 import logging
 import os
@@ -123,7 +127,11 @@ class RefstackClient(testcase.TestCase):
                 skipped_testcases += match + ", "
 
             num_executed = int(num_tests) - int(num_skipped)
-            success_rate = 100 * int(num_success) / int(num_executed)
+
+            try:
+                self.result = 100 * int(num_success) / int(num_executed)
+            except ZeroDivisionError:
+                logger.error("No test has been executed")
 
             self.details = {"tests": int(num_tests),
                             "failures": int(num_failures),
@@ -131,12 +139,10 @@ class RefstackClient(testcase.TestCase):
                             "errors": failed_testcases,
                             "skipped": skipped_testcases}
         except Exception:
-            success_rate = 0
+            self.result = 0
 
-        self.result = ft_utils.check_success_rate(
-            self.case_name, success_rate)
-        logger.info("Testcase %s success_rate is %s%%, is marked as %s"
-                    % (self.case_name, success_rate, self.result))
+        logger.info("Testcase %s success_rate is %s%%"
+                    % (self.case_name, self.result))
 
     def run(self):
         '''used for functest command line,
index 984e2a1..a41d07c 100644 (file)
@@ -8,6 +8,8 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 #
 
+from __future__ import division
+
 import logging
 import os
 import re
@@ -181,7 +183,13 @@ class TempestCommon(testcase.TestCase):
 
         try:
             num_executed = int(num_tests) - int(num_skipped)
-            success_rate = 100 * int(num_success) / int(num_executed)
+            try:
+                self.result = 100 * int(num_success) / int(num_executed)
+            except ZeroDivisionError:
+                logger.error("No test has been executed")
+                self.result = 0
+                return
+
             with open(os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
                                    "tempest.log"), 'r') as logfile:
                 output = logfile.read()
@@ -198,12 +206,10 @@ class TempestCommon(testcase.TestCase):
                             "errors": error_logs,
                             "skipped": skipped_testcase}
         except Exception:
-            success_rate = 0
+            self.result = 0
 
-        self.result = ft_utils.check_success_rate(
-            self.case_name, success_rate)
-        logger.info("Tempest %s success_rate is %s%%, is marked as %s"
-                    % (self.case_name, success_rate, self.result))
+        logger.info("Tempest %s success_rate is %s%%"
+                    % (self.case_name, self.result))
 
     def run(self):
 
index fe25dfc..c782861 100644 (file)
@@ -343,19 +343,6 @@ class OSRallyTesting(unittest.TestCase):
             self.rally_base._run_tests()
             self.rally_base._run_task.assert_any_call('test1')
 
-    @mock.patch('functest.opnfv_tests.openstack.rally.rally.logger.info')
-    def test_generate_report(self, mock_logger_info):
-        summary = [{'test_name': 'test_name',
-                    'overall_duration': 5,
-                    'nb_tests': 3,
-                    'success': 5}]
-        self.rally_base.summary = summary
-        with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
-                        'ft_utils.check_success_rate',
-                        return_value='criteria'):
-            self.rally_base._generate_report()
-            self.assertTrue(mock_logger_info.called)
-
     def test_clean_up_default(self):
         self.rally_base.volume_type = mock.Mock()
         self.rally_base.cinder_client = mock.Mock()
index e05e5df..bb75c9e 100644 (file)
@@ -151,24 +151,6 @@ class OSTempestTesting(unittest.TestCase):
                 assert_any_call("Starting Tempest test suite: '%s'."
                                 % cmd_line)
 
-    @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.logger.info')
-    def test_parse_verifier_result_default(self, mock_logger_info):
-        self.tempestcommon.VERIFICATION_ID = 'test_uuid'
-        self.tempestcommon.case_name = 'test_case_name'
-        stdout = ['Testscount||2', 'Success||2', 'Skipped||0', 'Failures||0']
-        with mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                        'subprocess.Popen') as mock_popen, \
-            mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
-                       'ft_utils.check_success_rate') as mock_method, \
-                mock.patch('__builtin__.open', mock.mock_open()):
-            mock_stdout = mock.Mock()
-            attrs = {'stdout': stdout}
-            mock_stdout.configure_mock(**attrs)
-            mock_popen.return_value = mock_stdout
-
-            self.tempestcommon.parse_verifier_result()
-            mock_method.assert_any_call('test_case_name', 100)
-
     @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
                 'os.path.exists', return_value=False)
     @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs',
index d48e06f..57e0c46 100644 (file)
@@ -563,22 +563,6 @@ class FunctestUtilsTesting(unittest.TestCase):
                 assert_called_once_with(self.parameter,
                                         self.config_yaml)
 
-    def test_check_success_rate_default(self):
-        with mock.patch('functest.utils.functest_utils.get_criteria_by_test') \
-                as mock_criteria:
-            mock_criteria.return_value = self.criteria
-            resp = functest_utils.check_success_rate(self.case_name,
-                                                     self.result)
-            self.assertEqual(resp, 100)
-
-    def test_check_success_rate_failed(self):
-        with mock.patch('functest.utils.functest_utils.get_criteria_by_test') \
-                as mock_criteria:
-            mock_criteria.return_value = self.criteria
-            resp = functest_utils.check_success_rate(self.case_name,
-                                                     0)
-            self.assertEqual(resp, 0)
-
     # TODO: merge_dicts
 
     def test_get_testcases_file_dir(self):
index 744258b..bf30f56 100644 (file)
@@ -379,16 +379,6 @@ def get_functest_config(parameter):
     return get_parameter_from_yaml(parameter, yaml_)
 
 
-def check_success_rate(case_name, result):
-    # It should be removed as TestCase tests criteria
-    # and result.
-    logger.warning('check_success_rate will be removed soon')
-    criteria = get_criteria_by_test(case_name)
-    if type(criteria) == int and result >= criteria:
-        return 100
-    return 0
-
-
 def merge_dicts(dict1, dict2):
     for k in set(dict1.keys()).union(dict2.keys()):
         if k in dict1 and k in dict2: