Convert SLA asserts to raises 61/57061/9
authorMiikka Koistinen <miikka.koistinen@nokia.com>
Fri, 8 Jun 2018 10:54:48 +0000 (13:54 +0300)
committerMiikka Koistinen <miikka.koistinen@nokia.com>
Fri, 15 Jun 2018 11:32:37 +0000 (14:32 +0300)
This commit converts Python assertions to a custom exception in all
places where SLA validation is checked with an assertion.

This commit also fixes all emerged pylint errors.

JIRA: YARDSTICK-966

Change-Id: If771ed03b2cbc0a43a57fcfb9293f18740b3ff80
Signed-off-by: Miikka Koistinen <miikka.koistinen@nokia.com>
46 files changed:
yardstick/benchmark/runners/arithmetic.py
yardstick/benchmark/runners/duration.py
yardstick/benchmark/runners/dynamictp.py
yardstick/benchmark/runners/iteration.py
yardstick/benchmark/runners/search.py
yardstick/benchmark/runners/sequence.py
yardstick/benchmark/scenarios/availability/scenario_general.py
yardstick/benchmark/scenarios/availability/serviceha.py
yardstick/benchmark/scenarios/base.py
yardstick/benchmark/scenarios/compute/cyclictest.py
yardstick/benchmark/scenarios/compute/lmbench.py
yardstick/benchmark/scenarios/compute/perf.py
yardstick/benchmark/scenarios/compute/qemu_migrate.py
yardstick/benchmark/scenarios/compute/ramspeed.py
yardstick/benchmark/scenarios/compute/unixbench.py
yardstick/benchmark/scenarios/networking/iperf3.py
yardstick/benchmark/scenarios/networking/moongen_testpmd.py
yardstick/benchmark/scenarios/networking/netperf.py
yardstick/benchmark/scenarios/networking/netperf_node.py
yardstick/benchmark/scenarios/networking/nstat.py
yardstick/benchmark/scenarios/networking/ping.py
yardstick/benchmark/scenarios/networking/ping6.py
yardstick/benchmark/scenarios/networking/pktgen.py
yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
yardstick/benchmark/scenarios/networking/vsperf.py
yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
yardstick/benchmark/scenarios/storage/fio.py
yardstick/common/exceptions.py
yardstick/tests/unit/benchmark/runner/test_search.py
yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py
yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py
yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py
yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py
yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py
yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py
yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py

index 6aaaed8..ecb59f9 100755 (executable)
@@ -37,6 +37,7 @@ import six
 from six.moves import range
 
 from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
 
 LOG = logging.getLogger(__name__)
 
@@ -86,7 +87,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
         loop_iter = six.moves.zip(*param_iters)
     else:
         LOG.warning("iter_type unrecognized: %s", iter_type)
-        raise TypeError("iter_type unrecognized: %s", iter_type)
+        raise TypeError("iter_type unrecognized: %s" % iter_type)
 
     # Populate options and run the requested method for each value combination
     for comb_values in loop_iter:
@@ -105,14 +106,14 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
         try:
             result = method(data)
-        except AssertionError as assertion:
+        except y_exc.SLAValidationError as error:
             # SLA validation failed in scenario, determine what to do now
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s", assertion.args)
-                errors = assertion.args
-        except Exception as e:
+                LOG.warning("SLA validation failed: %s", error.args)
+                errors = error.args
+        except Exception as e:  # pylint: disable=broad-except
             errors = traceback.format_exc()
             LOG.exception(e)
         else:
index 60b0348..60f1fa5 100644 (file)
@@ -27,6 +27,7 @@ import traceback
 import time
 
 from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
 
 LOG = logging.getLogger(__name__)
 
@@ -70,13 +71,13 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
         try:
             result = method(data)
-        except AssertionError as assertion:
+        except y_exc.SLAValidationError as error:
             # SLA validation failed in scenario, determine what to do now
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s", assertion.args)
-                errors = assertion.args
+                LOG.warning("SLA validation failed: %s", error.args)
+                errors = error.args
         # catch all exceptions because with multiprocessing we can have un-picklable exception
         # problems  https://bugs.python.org/issue9400
         except Exception:  # pylint: disable=broad-except
index 63bfc82..88d3c57 100755 (executable)
@@ -27,6 +27,7 @@ import traceback
 import os
 
 from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
 
 LOG = logging.getLogger(__name__)
 
@@ -80,10 +81,10 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
                 try:
                     method(data)
-                except AssertionError as assertion:
-                    LOG.warning("SLA validation failed: %s" % assertion.args)
+                except y_exc.SLAValidationError as error:
+                    LOG.warning("SLA validation failed: %s", error.args)
                     too_high = True
-                except Exception as e:
+                except Exception as e:  # pylint: disable=broad-except
                     errors = traceback.format_exc()
                     LOG.exception(e)
 
index 20d6da0..4c88f36 100644 (file)
@@ -29,6 +29,7 @@ import traceback
 import os
 
 from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
 
 LOG = logging.getLogger(__name__)
 
@@ -75,13 +76,13 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
             try:
                 result = method(data)
-            except AssertionError as assertion:
+            except y_exc.SLAValidationError as error:
                 # SLA validation failed in scenario, determine what to do now
                 if sla_action == "assert":
                     raise
                 elif sla_action == "monitor":
-                    LOG.warning("SLA validation failed: %s", assertion.args)
-                    errors = assertion.args
+                    LOG.warning("SLA validation failed: %s", error.args)
+                    errors = error.args
                 elif sla_action == "rate-control":
                     try:
                         scenario_cfg['options']['rate']
index 8037329..01a4292 100644 (file)
@@ -33,6 +33,7 @@ from collections import Mapping
 from six.moves import zip
 
 from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
 
 LOG = logging.getLogger(__name__)
 
@@ -119,14 +120,14 @@ If the scenario ends before the time has elapsed, it will be started again.
 
         try:
             self.worker_helper(data)
-        except AssertionError as assertion:
+        except y_exc.SLAValidationError as error:
             # SLA validation failed in scenario, determine what to do now
             if self.sla_action == "assert":
                 raise
             elif self.sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s", assertion.args)
-                errors = assertion.args
-        except Exception as e:
+                LOG.warning("SLA validation failed: %s", error.args)
+                errors = error.args
+        except Exception as e:  # pylint: disable=broad-except
             errors = traceback.format_exc()
             LOG.exception(e)
 
index d6e3f71..0148a45 100644 (file)
@@ -30,6 +30,7 @@ import traceback
 import os
 
 from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
 
 LOG = logging.getLogger(__name__)
 
@@ -74,14 +75,14 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
         try:
             result = method(data)
-        except AssertionError as assertion:
+        except y_exc.SLAValidationError as error:
             # SLA validation failed in scenario, determine what to do now
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s", assertion.args)
-                errors = assertion.args
-        except Exception as e:
+                LOG.warning("SLA validation failed: %s", error.args)
+                errors = error.args
+        except Exception as e:  # pylint: disable=broad-except
             errors = traceback.format_exc()
             LOG.exception(e)
         else:
index 1fadd25..e2db03a 100644 (file)
@@ -58,16 +58,20 @@ class ScenarioGeneral(base.Scenario):
         self.director.stopMonitors()
 
         verify_result = self.director.verify()
+        service_not_found = False
         for k, v in self.director.data.items():
             if v == 0:
-                result['sla_pass'] = 0
                 verify_result = False
+                service_not_found = True
                 LOG.info("\033[92m The service process (%s) not found in the host environment", k)
 
         result['sla_pass'] = 1 if verify_result else 0
         self.director.store_result(result)
 
-        assert verify_result is True, "The HA test case NOT passed"
+        self.verify_SLA(
+            verify_result, ("a service process was not found in the host "
+                            "environment" if service_not_found
+                            else "Director.verify() failed"))
 
     def teardown(self):
         self.director.knockoff()
index 42941c6..76721e3 100755 (executable)
@@ -70,17 +70,20 @@ class ServiceHA(base.Scenario):
         LOG.info("Monitor '%s' stop!", self.__scenario_type__)
 
         sla_pass = self.monitorMgr.verify_SLA()
+        service_not_found = False
         for k, v in self.data.items():
             if v == 0:
                 sla_pass = False
+                service_not_found = True
                 LOG.info("The service process (%s) not found in the host envrioment", k)
 
         result['sla_pass'] = 1 if sla_pass else 0
         self.monitorMgr.store_result(result)
 
-        assert sla_pass is True, "The HA test case NOT pass the SLA"
-
-        return
+        self.verify_SLA(
+            sla_pass, ("a service process was not found in the host "
+                       "environment" if service_not_found
+                       else "MonitorMgr.verify_SLA() failed"))
 
     def teardown(self):
         """scenario teardown"""
index 58a0280..30ac1be 100644 (file)
@@ -20,6 +20,7 @@ import six
 from stevedore import extension
 
 import yardstick.common.utils as utils
+from yardstick.common import exceptions as y_exc
 
 
 def _iter_scenario_classes(scenario_type=None):
@@ -61,6 +62,11 @@ class Scenario(object):
         """Time waited after executing the run method"""
         time.sleep(time_seconds)
 
+    def verify_SLA(self, condition, error_msg):
+        if not condition:
+            raise y_exc.SLAValidationError(
+                case_name=self.__scenario_type__, error_msg=error_msg)
+
     @staticmethod
     def get_types():
         """return a list of known runner type (class) names"""
index 998463e..413709f 100644 (file)
@@ -100,7 +100,7 @@ class Cyclictest(base.Scenario):
 
     def _run_setup_cmd(self, client, cmd):
         LOG.debug("Run cmd: %s", cmd)
-        status, stdout, stderr = client.execute(cmd)
+        status, _, stderr = client.execute(cmd)
         if status:
             if re.search(self.REBOOT_CMD_PATTERN, cmd):
                 LOG.debug("Error on reboot")
@@ -195,7 +195,7 @@ class Cyclictest(base.Scenario):
                 if latency > sla_latency:
                     sla_error += "%s latency %d > sla:max_%s_latency(%d); " % \
                         (t, latency, t, sla_latency)
-            assert sla_error == "", sla_error
+            self.verify_SLA(sla_error == "", sla_error)
 
 
 def _test():    # pragma: no cover
index 801f7fa..2237e49 100644 (file)
@@ -119,8 +119,8 @@ class Lmbench(base.Scenario):
             cmd = "sudo bash lmbench_latency_for_cache.sh %d %d" % \
                   (repetition, warmup)
         else:
-            raise RuntimeError("No such test_type: %s for Lmbench scenario",
-                               test_type)
+            raise RuntimeError("No such test_type: %s for Lmbench scenario"
+                               test_type)
 
         LOG.debug("Executing command: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
@@ -157,7 +157,7 @@ class Lmbench(base.Scenario):
                 if sla_latency < cache_latency:
                     sla_error += "latency %f > sla:max_latency(%f); " \
                         % (cache_latency, sla_latency)
-            assert sla_error == "", sla_error
+            self.verify_SLA(sla_error == "", sla_error)
 
 
 def _test():
index 0b8ed9b..b973211 100644 (file)
@@ -93,7 +93,7 @@ class Perf(base.Scenario):
             % (load, duration, events_string)
 
         LOG.debug("Executing command: %s", cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        status, stdout, _ = self.client.execute(cmd)
 
         if status:
             raise RuntimeError(stdout)
@@ -105,16 +105,14 @@ class Perf(base.Scenario):
             exp_val = self.scenario_cfg['sla']['expected_value']
             smaller_than_exp = 'smaller_than_expected' \
                                in self.scenario_cfg['sla']
-
-            if metric not in result:
-                assert False, "Metric (%s) not found." % metric
-            else:
-                if smaller_than_exp:
-                    assert result[metric] < exp_val, "%s %d >= %d (sla); " \
-                        % (metric, result[metric], exp_val)
-                else:
-                    assert result[metric] >= exp_val, "%s %d < %d (sla); " \
-                        % (metric, result[metric], exp_val)
+            self.verify_SLA(metric in result,
+                            "Metric (%s) not found." % metric)
+            self.verify_SLA(
+                not smaller_than_exp,
+                "%s %d >= %d (sla); " % (metric, result[metric], exp_val))
+            self.verify_SLA(
+                result[metric] >= exp_val,
+                "%s %d < %d (sla); " % (metric, result[metric], exp_val))
 
 
 def _test():
index 2de1270..975c90b 100644 (file)
@@ -56,7 +56,7 @@ class QemuMigrate(base.Scenario):
 
     def _run_setup_cmd(self, client, cmd):
         LOG.debug("Run cmd: %s", cmd)
-        status, stdout, stderr = client.execute(cmd)
+        status, _, stderr = client.execute(cmd)
         if status:
             if re.search(self.REBOOT_CMD_PATTERN, cmd):
                 LOG.debug("Error on reboot")
@@ -127,7 +127,7 @@ class QemuMigrate(base.Scenario):
                 if timevalue > sla_time:
                     sla_error += "%s timevalue %d > sla:max_%s(%d); " % \
                         (t, timevalue, t, sla_time)
-            assert sla_error == "", sla_error
+            self.verify_SLA(sla_error == "", sla_error)
 
 
 def _test():    # pragma: no cover
index ca64935..4daf776 100644 (file)
@@ -121,8 +121,8 @@ class Ramspeed(base.Scenario):
                   (test_id, load, block_size)
         # only the test_id 1-6 will be used in this scenario
         else:
-            raise RuntimeError("No such type_id: %s for Ramspeed scenario",
-                               test_id)
+            raise RuntimeError("No such type_id: %s for Ramspeed scenario"
+                               test_id)
 
         LOG.debug("Executing command: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
@@ -140,4 +140,4 @@ class Ramspeed(base.Scenario):
                 if bw < sla_min_bw:
                     sla_error += "Bandwidth %f < " \
                         "sla:min_bandwidth(%f)" % (bw, sla_min_bw)
-            assert sla_error == "", sla_error
+            self.verify_SLA(sla_error == "", sla_error)
index cdb3457..3cea316 100644 (file)
@@ -125,7 +125,7 @@ class Unixbench(base.Scenario):
                 if score < sla_score:
                     sla_error += "%s score %f < sla:%s_score(%f); " % \
                         (t, score, t, sla_score)
-            assert sla_error == "", sla_error
+            self.verify_SLA(sla_error == "", sla_error)
 
 
 def _test():  # pragma: no cover
index 98c4599..51e044e 100644 (file)
@@ -92,7 +92,7 @@ For more info see http://software.es.net/iperf
     def teardown(self):
         LOG.debug("teardown")
         self.host.close()
-        status, stdout, stderr = self.target.execute("pkill iperf3")
+        status, _, stderr = self.target.execute("pkill iperf3")
         if status:
             LOG.warning(stderr)
         self.target.close()
@@ -145,7 +145,7 @@ For more info see http://software.es.net/iperf
 
         LOG.debug("Executing command: %s", cmd)
 
-        status, stdout, stderr = self.host.execute(cmd)
+        status, stdout, _ = self.host.execute(cmd)
         if status:
             # error cause in json dict on stdout
             raise RuntimeError(stdout)
@@ -165,16 +165,17 @@ For more info see http://software.es.net/iperf
                 bit_per_second = \
                     int(iperf_result["end"]["sum_received"]["bits_per_second"])
                 bytes_per_second = bit_per_second / 8
-                assert bytes_per_second >= sla_bytes_per_second, \
-                    "bytes_per_second %d < sla:bytes_per_second (%d); " % \
-                    (bytes_per_second, sla_bytes_per_second)
+                self.verify_SLA(
+                    bytes_per_second >= sla_bytes_per_second,
+                    "bytes_per_second %d < sla:bytes_per_second (%d); "
+                    % (bytes_per_second, sla_bytes_per_second))
             else:
                 sla_jitter = float(sla_iperf["jitter"])
 
                 jitter_ms = float(iperf_result["end"]["sum"]["jitter_ms"])
-                assert jitter_ms <= sla_jitter, \
-                    "jitter_ms  %f > sla:jitter %f; " % \
-                    (jitter_ms, sla_jitter)
+                self.verify_SLA(jitter_ms <= sla_jitter,
+                                "jitter_ms  %f > sla:jitter %f; "
+                                % (jitter_ms, sla_jitter))
 
 
 def _test():
index 86173c9..e3bd7af 100644 (file)
@@ -367,9 +367,10 @@ ports = {0,1},
             throughput_rx_mpps = int(
                 self.scenario_cfg["sla"]["throughput_rx_mpps"])
 
-            assert throughput_rx_mpps <= moongen_result["tx_mpps"], \
-                "sla_throughput_rx_mpps %f > throughput_rx_mpps(%f); " % \
-                (throughput_rx_mpps, moongen_result["tx_mpps"])
+            self.verify_SLA(
+                throughput_rx_mpps <= moongen_result["tx_mpps"],
+                "sla_throughput_rx_mpps %f > throughput_rx_mpps(%f); "
+                % (throughput_rx_mpps, moongen_result["tx_mpps"]))
 
     def teardown(self):
         """cleanup after the test execution"""
index 33c02d4..9f1a814 100755 (executable)
@@ -138,9 +138,9 @@ class Netperf(base.Scenario):
             sla_max_mean_latency = int(
                 self.scenario_cfg["sla"]["mean_latency"])
 
-            assert mean_latency <= sla_max_mean_latency, \
-                "mean_latency %f > sla_max_mean_latency(%f); " % \
-                (mean_latency, sla_max_mean_latency)
+            self.verify_SLA(mean_latency <= sla_max_mean_latency,
+                            "mean_latency %f > sla_max_mean_latency(%f); "
+                            % (mean_latency, sla_max_mean_latency))
 
 
 def _test():
index d52e6b9..0ad2ecf 100755 (executable)
@@ -156,9 +156,10 @@ class NetperfNode(base.Scenario):
             sla_max_mean_latency = int(
                 self.scenario_cfg["sla"]["mean_latency"])
 
-            assert mean_latency <= sla_max_mean_latency, \
-                "mean_latency %f > sla_max_mean_latency(%f); " % \
-                (mean_latency, sla_max_mean_latency)
+            self.verify_SLA(
+                mean_latency <= sla_max_mean_latency,
+                "mean_latency %f > sla_max_mean_latency(%f); "
+                % (mean_latency, sla_max_mean_latency))
 
     def teardown(self):
         """remove netperf from nodes after test"""
index 10c5607..ea067f8 100644 (file)
@@ -121,4 +121,4 @@ class Nstat(base.Scenario):
                 if rate > sla_rate:
                     sla_error += "%s rate %f > sla:%s_rate(%f); " % \
                         (i, rate, i, sla_rate)
-            assert sla_error == "", sla_error
+            self.verify_SLA(sla_error == "", sla_error)
index e7d9bee..6caeab5 100644 (file)
@@ -91,9 +91,10 @@ class Ping(base.Scenario):
                 result.update(utils.flatten_dict_key(ping_result))
                 if sla_max_rtt is not None:
                     sla_max_rtt = float(sla_max_rtt)
-                    assert rtt_result[target_vm_name] <= sla_max_rtt,\
-                        "rtt %f > sla: max_rtt(%f); " % \
-                        (rtt_result[target_vm_name], sla_max_rtt)
+                    self.verify_SLA(
+                        rtt_result[target_vm_name] <= sla_max_rtt,
+                        "rtt %f > sla: max_rtt(%f); "
+                        % (rtt_result[target_vm_name], sla_max_rtt))
             else:
                 LOG.error("ping '%s' '%s' timeout", options, target_vm)
                 # we need to specify a result to satisfy influxdb schema
@@ -102,13 +103,12 @@ class Ping(base.Scenario):
                 rtt_result[target_vm_name] = float(self.PING_ERROR_RTT)
                 # store result before potential AssertionError
                 result.update(utils.flatten_dict_key(ping_result))
-                if sla_max_rtt is not None:
-                    raise AssertionError("packet dropped rtt {:f} > sla: max_rtt({:f})".format(
-                        rtt_result[target_vm_name], sla_max_rtt))
-
-                else:
-                    raise AssertionError(
-                        "packet dropped rtt {:f}".format(rtt_result[target_vm_name]))
+                self.verify_SLA(sla_max_rtt is None,
+                                "packet dropped rtt %f > sla: max_rtt(%f)"
+                                % (rtt_result[target_vm_name], sla_max_rtt))
+                self.verify_SLA(False,
+                                "packet dropped rtt %f"
+                                % (rtt_result[target_vm_name]))
 
 
 def _test():    # pragma: no cover
index 74855a1..3772780 100644 (file)
@@ -59,8 +59,7 @@ class Ping6(base.Scenario):  # pragma: no cover
             self._ssh_host(node_name)
             self.client._put_file_shell(
                 self.pre_setup_script, '~/pre_setup.sh')
-            status, stdout, stderr = self.client.execute(
-                "sudo bash pre_setup.sh")
+            self.client.execute("sudo bash pre_setup.sh")
 
     def _get_controller_node(self, host_list):
         for host_name in host_list:
@@ -122,7 +121,7 @@ class Ping6(base.Scenario):  # pragma: no cover
         cmd = "sudo bash %s %s %s" % \
               (setup_bash_file, self.openrc, self.external_network)
         LOG.debug("Executing setup command: %s", cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        self.client.execute(cmd)
 
         self.setup_done = True
 
@@ -171,8 +170,9 @@ class Ping6(base.Scenario):  # pragma: no cover
             result["rtt"] = float(stdout)
             if "sla" in self.scenario_cfg:
                 sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"])
-                assert result["rtt"] <= sla_max_rtt, \
-                    "rtt %f > sla:max_rtt(%f); " % (result["rtt"], sla_max_rtt)
+                self.verify_SLA(result["rtt"] <= sla_max_rtt,
+                                "rtt %f > sla:max_rtt(%f); "
+                                % (result["rtt"], sla_max_rtt))
         else:
             LOG.error("ping6 timeout!!!")
         self.run_done = True
@@ -216,5 +216,4 @@ class Ping6(base.Scenario):  # pragma: no cover
             self._ssh_host(node_name)
             self.client._put_file_shell(
                 self.post_teardown_script, '~/post_teardown.sh')
-            status, stdout, stderr = self.client.execute(
-                "sudo bash post_teardown.sh")
+            self.client.execute("sudo bash post_teardown.sh")
index b79b915..d1d500f 100644 (file)
@@ -87,7 +87,7 @@ class Pktgen(base.Scenario):
         self.server.send_command(cmd)
         self.client.send_command(cmd)
 
-        """multiqueue setup"""
+        # multiqueue setup
         if not self._is_irqbalance_disabled():
             self._disable_irqbalance()
 
@@ -132,20 +132,20 @@ class Pktgen(base.Scenario):
     def _disable_irqbalance(self):
         cmd = "sudo sed -i -e 's/ENABLED=\"1\"/ENABLED=\"0\"/g' " \
               "/etc/default/irqbalance"
-        status, stdout, stderr = self.server.execute(cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        status, _, stderr = self.server.execute(cmd)
+        status, _, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError(stderr)
 
         cmd = "sudo service irqbalance stop"
-        status, stdout, stderr = self.server.execute(cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        status, _, stderr = self.server.execute(cmd)
+        status, _, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError(stderr)
 
         cmd = "sudo service irqbalance disable"
-        status, stdout, stderr = self.server.execute(cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        status, _, stderr = self.server.execute(cmd)
+        status, _, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError(stderr)
 
@@ -158,8 +158,8 @@ class Pktgen(base.Scenario):
             raise RuntimeError(stderr)
 
         cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
-        status, stdout, stderr = self.server.execute(cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        status, _, stderr = self.server.execute(cmd)
+        status, _, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError(stderr)
 
@@ -171,8 +171,8 @@ class Pktgen(base.Scenario):
             raise RuntimeError(stderr)
 
         cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
-        status, stdout, stderr = self.server.execute(cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        status, _, stderr = self.server.execute(cmd)
+        status, _, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError(stderr)
 
@@ -192,8 +192,8 @@ class Pktgen(base.Scenario):
 
             cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
                 % (smp_affinity_mask, int(stdout))
-            status, stdout, stderr = self.server.execute(cmd)
-            status, stdout, stderr = self.client.execute(cmd)
+            status, _, stderr = self.server.execute(cmd)
+            status, _, stderr = self.client.execute(cmd)
             if status:
                 raise RuntimeError(stderr)
 
@@ -206,8 +206,8 @@ class Pktgen(base.Scenario):
 
             cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
                 % (smp_affinity_mask, int(stdout))
-            status, stdout, stderr = self.server.execute(cmd)
-            status, stdout, stderr = self.client.execute(cmd)
+            status, _, stderr = self.server.execute(cmd)
+            status, _, stderr = self.client.execute(cmd)
             if status:
                 raise RuntimeError(stderr)
 
@@ -220,8 +220,8 @@ class Pktgen(base.Scenario):
             raise RuntimeError(stderr)
 
         cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
-        status, stdout, stderr = self.server.execute(cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        status, _, stderr = self.server.execute(cmd)
+        status, _, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError(stderr)
 
@@ -240,8 +240,8 @@ class Pktgen(base.Scenario):
 
             cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
                 % (smp_affinity_mask, int(stdout))
-            status, stdout, stderr = self.server.execute(cmd)
-            status, stdout, stderr = self.client.execute(cmd)
+            status, _, stderr = self.server.execute(cmd)
+            status, _, stderr = self.client.execute(cmd)
             if status:
                 raise RuntimeError(stderr)
 
@@ -282,8 +282,8 @@ class Pktgen(base.Scenario):
             cmd = "sudo ethtool -L %s combined %s" % \
                 (self.vnic_name, available_queue_number)
             LOG.debug("Executing command: %s", cmd)
-            status, stdout, stderr = self.server.execute(cmd)
-            status, stdout, stderr = self.client.execute(cmd)
+            status, _, stderr = self.server.execute(cmd)
+            status, _, stderr = self.client.execute(cmd)
             if status:
                 raise RuntimeError(stderr)
         return available_queue_number
@@ -374,8 +374,8 @@ class Pktgen(base.Scenario):
         if "sla" in self.scenario_cfg:
             LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
             sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
-            assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
-                % (ppm, sla_max_ppm)
+            self.verify_SLA(ppm <= sla_max_ppm,
+                            "ppm %d > sla_max_ppm %d; " % (ppm, sla_max_ppm))
 
 
 def _test():  # pragma: no cover
index 9a7b975..1b018f5 100644 (file)
@@ -135,4 +135,4 @@ cat ~/result.log -vT \
             LOG.info("sla_max_latency: %d", sla_max_latency)
             debug_info = "avg_latency %d > sla_max_latency %d" \
                 % (avg_latency, sla_max_latency)
-            assert avg_latency <= sla_max_latency, debug_info
+            self.verify_SLA(avg_latency <= sla_max_latency, debug_info)
index 497e59e..97b9cf7 100644 (file)
@@ -143,11 +143,11 @@ class PktgenDPDK(base.Scenario):
         cmd = "ip a | grep eth1 2>/dev/null"
         LOG.debug("Executing command: %s in %s", cmd, host)
         if "server" in host:
-            status, stdout, stderr = self.server.execute(cmd)
+            _, stdout, _ = self.server.execute(cmd)
             if stdout:
                 is_run = False
         else:
-            status, stdout, stderr = self.client.execute(cmd)
+            _, stdout, _ = self.client.execute(cmd)
             if stdout:
                 is_run = False
 
@@ -222,5 +222,5 @@ class PktgenDPDK(base.Scenario):
             ppm += (sent - received) % sent > 0
             LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
             sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
-            assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
-                % (ppm, sla_max_ppm)
+            self.verify_SLA(ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; "
+                                                % (ppm, sla_max_ppm))
index 705544c..2b34740 100644 (file)
@@ -215,15 +215,15 @@ class Vsperf(base.Scenario):
         if 'sla' in self.scenario_cfg and \
            'metrics' in self.scenario_cfg['sla']:
             for metric in self.scenario_cfg['sla']['metrics'].split(','):
-                assert metric in result, \
-                    '%s is not collected by VSPERF' % (metric)
-                assert metric in self.scenario_cfg['sla'], \
-                    '%s is not defined in SLA' % (metric)
+                self.verify_SLA(metric in result,
+                                '%s was not collected by VSPERF' % metric)
+                self.verify_SLA(metric in self.scenario_cfg['sla'],
+                                '%s is not defined in SLA' % metric)
                 vs_res = float(result[metric])
                 sla_res = float(self.scenario_cfg['sla'][metric])
-                assert vs_res >= sla_res, \
-                    'VSPERF_%s(%f) < SLA_%s(%f)' % \
-                    (metric, vs_res, metric, sla_res)
+                self.verify_SLA(vs_res >= sla_res,
+                                'VSPERF_%s(%f) < SLA_%s(%f)'
+                                % (metric, vs_res, metric, sla_res))
 
     def teardown(self):
         """cleanup after the test execution"""
index 4545878..27bf40d 100644 (file)
@@ -231,7 +231,7 @@ class VsperfDPDK(base.Scenario):
         is_run = True
         cmd = "ip a | grep %s 2>/dev/null" % (self.tg_port1)
         LOG.debug("Executing command: %s", cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        _, stdout, _ = self.client.execute(cmd)
         if stdout:
             is_run = False
         return is_run
@@ -325,15 +325,15 @@ class VsperfDPDK(base.Scenario):
         if 'sla' in self.scenario_cfg and \
            'metrics' in self.scenario_cfg['sla']:
             for metric in self.scenario_cfg['sla']['metrics'].split(','):
-                assert metric in result, \
-                    '%s is not collected by VSPERF' % (metric)
-                assert metric in self.scenario_cfg['sla'], \
-                    '%s is not defined in SLA' % (metric)
+                self.verify_SLA(metric in result,
+                                '%s was not collected by VSPERF' % metric)
+                self.verify_SLA(metric in self.scenario_cfg['sla'],
+                                '%s is not defined in SLA' % metric)
                 vs_res = float(result[metric])
                 sla_res = float(self.scenario_cfg['sla'][metric])
-                assert vs_res >= sla_res, \
-                    'VSPERF_%s(%f) < SLA_%s(%f)' % \
-                    (metric, vs_res, metric, sla_res)
+                self.verify_SLA(vs_res >= sla_res,
+                                'VSPERF_%s(%f) < SLA_%s(%f)'
+                                % (metric, vs_res, metric, sla_res))
 
     def teardown(self):
         """cleanup after the test execution"""
index d3ed840..c57c6ed 100644 (file)
@@ -223,7 +223,7 @@ class Fio(base.Scenario):
                         sla_error += "%s %d < " \
                             "sla:%s(%d); " % (k, v, k, min_v)
 
-            assert sla_error == "", sla_error
+            self.verify_SLA(sla_error == "", sla_error)
 
 
 def _test():
index 18bb4aa..954d655 100644 (file)
@@ -309,3 +309,7 @@ class IxNetworkFlowNotPresent(YardstickException):
 
 class IxNetworkFieldNotPresentInStackItem(YardstickException):
     message = 'Field "%(field_name)s" not present in stack item %(stack_item)s'
+
+
+class SLAValidationError(YardstickException):
+    message = '%(case_name)s SLA validation failed. Error: %(error_msg)s'
index 00a241c..10ea489 100644 (file)
@@ -19,6 +19,7 @@ import unittest
 
 from yardstick.benchmark.runners.search import SearchRunner
 from yardstick.benchmark.runners.search import SearchRunnerHelper
+from yardstick.common import exceptions as y_exc
 
 
 class TestSearchRunnerHelper(unittest.TestCase):
@@ -143,15 +144,15 @@ class TestSearchRunner(unittest.TestCase):
     def test__worker_run_once_assertion_error_assert(self):
         runner = SearchRunner({})
         runner.sla_action = 'assert'
-        runner.worker_helper = mock.MagicMock(side_effect=AssertionError)
+        runner.worker_helper = mock.MagicMock(side_effect=y_exc.SLAValidationError)
 
-        with self.assertRaises(AssertionError):
+        with self.assertRaises(y_exc.SLAValidationError):
             runner._worker_run_once('sequence 1')
 
     def test__worker_run_once_assertion_error_monitor(self):
         runner = SearchRunner({})
         runner.sla_action = 'monitor'
-        runner.worker_helper = mock.MagicMock(side_effect=AssertionError)
+        runner.worker_helper = mock.MagicMock(side_effect=y_exc.SLAValidationError)
 
         self.assertFalse(runner._worker_run_once('sequence 1'))
 
index d1172d5..cd065c9 100644 (file)
@@ -11,6 +11,7 @@ import mock
 import unittest
 
 from yardstick.benchmark.scenarios.availability import scenario_general
+from yardstick.common import exceptions as y_exc
 
 class ScenarioGeneralTestCase(unittest.TestCase):
 
@@ -59,6 +60,14 @@ class ScenarioGeneralTestCase(unittest.TestCase):
         self.instance.director.verify.return_value = False
         self.instance.director.data = {}
         ret = {}
-        self.assertRaises(AssertionError, self.instance.run, ret)
+        self.assertRaises(y_exc.SLAValidationError, self.instance.run, ret)
+        self.instance.teardown()
+        self.assertEqual(ret['sla_pass'], 0)
+
+    def test_scenario_general_case_service_not_found_fail(self):
+        self.instance.director.verify.return_value = True
+        self.instance.director.data = {"general-attacker": 0}
+        ret = {}
+        self.assertRaises(y_exc.SLAValidationError, self.instance.run, ret)
         self.instance.teardown()
         self.assertEqual(ret['sla_pass'], 0)
index dd656fb..cf1e76d 100644 (file)
@@ -11,6 +11,7 @@ import mock
 import unittest
 
 from yardstick.benchmark.scenarios.availability import serviceha
+from yardstick.common import exceptions as y_exc
 
 
 class ServicehaTestCase(unittest.TestCase):
@@ -71,5 +72,21 @@ class ServicehaTestCase(unittest.TestCase):
         mock_monitor.MonitorMgr().verify_SLA.return_value = False
 
         ret = {}
-        self.assertRaises(AssertionError, p.run, ret)
+        self.assertRaises(y_exc.SLAValidationError, p.run, ret)
+        self.assertEqual(ret['sla_pass'], 0)
+
+    @mock.patch.object(serviceha, 'baseattacker')
+    @mock.patch.object(serviceha, 'basemonitor')
+    def test__serviceha_run_service_not_found_sla_error(self, mock_monitor,
+                                                        *args):
+        p = serviceha.ServiceHA(self.args, self.ctx)
+
+        p.setup()
+        self.assertTrue(p.setup_done)
+        p.data["kill-process"] = 0
+
+        mock_monitor.MonitorMgr().verify_SLA.return_value = True
+
+        ret = {}
+        self.assertRaises(y_exc.SLAValidationError, p.run, ret)
         self.assertEqual(ret['sla_pass'], 0)
index f24ec24..4fadde4 100644 (file)
@@ -17,6 +17,7 @@ import mock
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.compute import cyclictest
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.compute.cyclictest.ssh')
@@ -122,7 +123,7 @@ class CyclictestTestCase(unittest.TestCase):
         sample_output = '{"min": 100, "avg": 500, "max": 1000}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, c.run, result)
+        self.assertRaises(y_exc.SLAValidationError, c.run, result)
 
     def test_cyclictest_unsuccessful_sla_avg_latency(self, mock_ssh):
 
@@ -136,7 +137,7 @@ class CyclictestTestCase(unittest.TestCase):
         sample_output = '{"min": 100, "avg": 500, "max": 1000}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, c.run, result)
+        self.assertRaises(y_exc.SLAValidationError, c.run, result)
 
     def test_cyclictest_unsuccessful_sla_max_latency(self, mock_ssh):
 
@@ -150,7 +151,7 @@ class CyclictestTestCase(unittest.TestCase):
         sample_output = '{"min": 100, "avg": 500, "max": 1000}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, c.run, result)
+        self.assertRaises(y_exc.SLAValidationError, c.run, result)
 
     def test_cyclictest_unsuccessful_script_error(self, mock_ssh):
 
index 9640ce0..c4ac347 100644 (file)
@@ -17,6 +17,7 @@ import mock
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.compute import lmbench
+from yardstick.common import exceptions as y_exc
 
 
 # pylint: disable=unused-argument
@@ -144,7 +145,7 @@ class LmbenchTestCase(unittest.TestCase):
 
         sample_output = '[{"latency": 37.5, "size": 0.00049}]'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, l.run, self.result)
+        self.assertRaises(y_exc.SLAValidationError, l.run, self.result)
 
     def test_unsuccessful_bandwidth_run_sla(self, mock_ssh):
 
@@ -162,7 +163,7 @@ class LmbenchTestCase(unittest.TestCase):
 
         sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 9925.5}'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, l.run, self.result)
+        self.assertRaises(y_exc.SLAValidationError, l.run, self.result)
 
     def test_successful_latency_for_cache_run_sla(self, mock_ssh):
 
index 03003d0..02040ca 100644 (file)
@@ -17,6 +17,7 @@ import mock
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.compute import qemu_migrate
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.compute.qemu_migrate.ssh')
@@ -116,7 +117,7 @@ class QemuMigrateTestCase(unittest.TestCase):
         sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, q.run, result)
+        self.assertRaises(y_exc.SLAValidationError, q.run, result)
 
     def test_qemu_migrate_unsuccessful_sla_downtime(self, mock_ssh):
 
@@ -129,7 +130,7 @@ class QemuMigrateTestCase(unittest.TestCase):
         sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, q.run, result)
+        self.assertRaises(y_exc.SLAValidationError, q.run, result)
 
     def test_qemu_migrate_unsuccessful_sla_setuptime(self, mock_ssh):
 
@@ -142,7 +143,7 @@ class QemuMigrateTestCase(unittest.TestCase):
         sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, q.run, result)
+        self.assertRaises(y_exc.SLAValidationError, q.run, result)
 
     def test_qemu_migrate_unsuccessful_script_error(self, mock_ssh):
 
index dcc0e81..9e055be 100644 (file)
@@ -18,6 +18,7 @@ from oslo_serialization import jsonutils
 
 from yardstick.common import utils
 from yardstick.benchmark.scenarios.compute import ramspeed
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.compute.ramspeed.ssh')
@@ -146,7 +147,7 @@ class RamspeedTestCase(unittest.TestCase):
  "Block_size(kb)": 16384, "Bandwidth(MBps)": 14128.94}, {"Test_type":\
  "INTEGER & WRITING", "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, r.run, self.result)
+        self.assertRaises(y_exc.SLAValidationError, r.run, self.result)
 
     def test_ramspeed_unsuccessful_script_error(self, mock_ssh):
         options = {
@@ -219,7 +220,7 @@ class RamspeedTestCase(unittest.TestCase):
  "Bandwidth(MBps)": 1300.27}, {"Test_type": "INTEGER AVERAGE:",\
  "Bandwidth(MBps)": 2401.58}]}'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, r.run, self.result)
+        self.assertRaises(y_exc.SLAValidationError, r.run, self.result)
 
     def test_ramspeed_unsuccessful_unknown_type_run(self, mock_ssh):
         options = {
index 6339a2d..e4a8d6e 100644 (file)
@@ -17,6 +17,7 @@ import mock
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.compute import unixbench
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.compute.unixbench.ssh')
@@ -122,7 +123,7 @@ class UnixbenchTestCase(unittest.TestCase):
         sample_output = '{"single_score":"200.7","parallel_score":"4395.9"}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, u.run, result)
+        self.assertRaises(y_exc.SLAValidationError, u.run, result)
 
     def test_unixbench_unsuccessful_sla_parallel_score(self, mock_ssh):
 
@@ -137,7 +138,7 @@ class UnixbenchTestCase(unittest.TestCase):
         sample_output = '{"signle_score":"2251.7","parallel_score":"3395.9"}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, u.run, result)
+        self.assertRaises(y_exc.SLAValidationError, u.run, result)
 
     def test_unixbench_unsuccessful_script_error(self, mock_ssh):
 
index 74144af..2190e93 100644 (file)
@@ -19,6 +19,7 @@ from oslo_serialization import jsonutils
 
 from yardstick.common import utils
 from yardstick.benchmark.scenarios.networking import iperf3
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.networking.iperf3.ssh')
@@ -118,7 +119,7 @@ class IperfTestCase(unittest.TestCase):
 
         sample_output = self._read_sample_output(self.output_name_tcp)
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_iperf_successful_sla_jitter(self, mock_ssh):
         options = {"protocol": "udp", "bandwidth": "20m"}
@@ -152,7 +153,7 @@ class IperfTestCase(unittest.TestCase):
 
         sample_output = self._read_sample_output(self.output_name_udp)
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_iperf_successful_tcp_protocal(self, mock_ssh):
         options = {"protocol": "tcp", "nodelay": "yes"}
index 5907562..a7abcd9 100755 (executable)
@@ -18,6 +18,7 @@ import mock
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.networking import netperf
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.networking.netperf.ssh')
@@ -98,7 +99,7 @@ class NetperfTestCase(unittest.TestCase):
 
         sample_output = self._read_sample_output()
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_netperf_unsuccessful_script_error(self, mock_ssh):
 
index 956a9c0..a577dba 100755 (executable)
@@ -19,6 +19,7 @@ import mock
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.networking import netperf_node
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.networking.netperf_node.ssh')
@@ -98,7 +99,7 @@ class NetperfNodeTestCase(unittest.TestCase):
 
         sample_output = self._read_sample_output()
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_netperf_node_unsuccessful_script_error(self, mock_ssh):
 
index 4adfab1..559e059 100644 (file)
@@ -14,6 +14,7 @@ import mock
 import unittest
 
 from yardstick.benchmark.scenarios.networking import ping
+from yardstick.common import exceptions as y_exc
 
 
 class PingTestCase(unittest.TestCase):
@@ -74,7 +75,7 @@ class PingTestCase(unittest.TestCase):
         p = ping.Ping(args, self.ctx)
 
         mock_ssh.SSH.from_node().execute.return_value = (0, '100', '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     @mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh')
     def test_ping_unsuccessful_script_error(self, mock_ssh):
index 4662c85..ad5217a 100644 (file)
@@ -14,6 +14,7 @@ import mock
 import unittest
 
 from yardstick.benchmark.scenarios.networking import ping6
+from yardstick.common import exceptions as y_exc
 
 
 class PingTestCase(unittest.TestCase):
@@ -98,7 +99,7 @@ class PingTestCase(unittest.TestCase):
         p = ping6.Ping6(args, self.ctx)
         p.client = mock_ssh.SSH.from_node()
         mock_ssh.SSH.from_node().execute.side_effect = [(0, 'host1', ''), (0, 100, '')]
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
     def test_ping_unsuccessful_script_error(self, mock_ssh):
index 6aea03a..ea0deab 100644 (file)
@@ -13,6 +13,7 @@ import unittest
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.networking import pktgen
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.networking.pktgen.ssh')
@@ -176,7 +177,7 @@ class PktgenTestCase(unittest.TestCase):
         sample_output = '{"packets_per_second": 9753, "errors": 0, \
             "packets_sent": 149776, "packetsize": 60, "flows": 110}'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_pktgen_unsuccessful_script_error(self, mock_ssh):
 
index 9760871..b141591 100644 (file)
@@ -12,6 +12,7 @@ import unittest
 
 import yardstick.common.utils as utils
 from yardstick.benchmark.scenarios.networking import pktgen_dpdk
+from yardstick.common import exceptions as y_exc
 
 
 class PktgenDPDKLatencyTestCase(unittest.TestCase):
@@ -162,7 +163,7 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
 
         sample_output = '100\n110\n112\n130\n149\n150\n90\n150\n200\n162\n'
         self.mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_pktgen_dpdk_unsuccessful_script_error(self):
 
index e90fb07..39392e4 100644 (file)
@@ -16,6 +16,7 @@ from oslo_serialization import jsonutils
 import mock
 
 from yardstick.benchmark.scenarios.networking import pktgen_dpdk_throughput
+from yardstick.common import exceptions as y_exc
 
 
 # pylint: disable=unused-argument
@@ -131,7 +132,7 @@ class PktgenDPDKTestCase(unittest.TestCase):
         sample_output = '{"packets_per_second": 9753, "errors": 0, \
             "packets_sent": 149776, "flows": 110}'
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_pktgen_dpdk_throughput_unsuccessful_script_error(
             self, mock_ssh):
index f149cee..6e69ddc 100644 (file)
@@ -18,6 +18,7 @@ import mock
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.storage import fio
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.storage.fio.ssh')
@@ -203,7 +204,7 @@ class FioTestCase(unittest.TestCase):
 
         sample_output = self._read_sample_output(self.sample_output['rw'])
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_fio_successful_bw_iops_sla(self, mock_ssh):
 
@@ -252,7 +253,7 @@ class FioTestCase(unittest.TestCase):
 
         sample_output = self._read_sample_output(self.sample_output['rw'])
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_fio_unsuccessful_script_error(self, mock_ssh):