from six.moves import range
 
 from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
 
 LOG = logging.getLogger(__name__)
 
         loop_iter = six.moves.zip(*param_iters)
     else:
         LOG.warning("iter_type unrecognized: %s", iter_type)
-        raise TypeError("iter_type unrecognized: %s", iter_type)
+        raise TypeError("iter_type unrecognized: %s" % iter_type)
 
     # Populate options and run the requested method for each value combination
     for comb_values in loop_iter:
 
         try:
             result = method(data)
-        except AssertionError as assertion:
+        except y_exc.SLAValidationError as error:
             # SLA validation failed in scenario, determine what to do now
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s", assertion.args)
-                errors = assertion.args
-        except Exception as e:
+                LOG.warning("SLA validation failed: %s", error.args)
+                errors = error.args
+        except Exception as e:  # pylint: disable=broad-except
             errors = traceback.format_exc()
             LOG.exception(e)
         else:
 
 import time
 
 from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
 
 LOG = logging.getLogger(__name__)
 
 
         try:
             result = method(data)
-        except AssertionError as assertion:
+        except y_exc.SLAValidationError as error:
             # SLA validation failed in scenario, determine what to do now
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s", assertion.args)
-                errors = assertion.args
+                LOG.warning("SLA validation failed: %s", error.args)
+                errors = error.args
         # catch all exceptions because with multiprocessing we can have un-picklable exception
         # problems  https://bugs.python.org/issue9400
         except Exception:  # pylint: disable=broad-except
 
 import os
 
 from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
 
 LOG = logging.getLogger(__name__)
 
 
                 try:
                     method(data)
-                except AssertionError as assertion:
-                    LOG.warning("SLA validation failed: %s" % assertion.args)
+                except y_exc.SLAValidationError as error:
+                    LOG.warning("SLA validation failed: %s", error.args)
                     too_high = True
-                except Exception as e:
+                except Exception as e:  # pylint: disable=broad-except
                     errors = traceback.format_exc()
                     LOG.exception(e)
 
 
 import os
 
 from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
 
 LOG = logging.getLogger(__name__)
 
 
             try:
                 result = method(data)
-            except AssertionError as assertion:
+            except y_exc.SLAValidationError as error:
                 # SLA validation failed in scenario, determine what to do now
                 if sla_action == "assert":
                     raise
                 elif sla_action == "monitor":
-                    LOG.warning("SLA validation failed: %s", assertion.args)
-                    errors = assertion.args
+                    LOG.warning("SLA validation failed: %s", error.args)
+                    errors = error.args
                 elif sla_action == "rate-control":
                     try:
                         scenario_cfg['options']['rate']
 
 from six.moves import zip
 
 from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
 
 LOG = logging.getLogger(__name__)
 
 
         try:
             self.worker_helper(data)
-        except AssertionError as assertion:
+        except y_exc.SLAValidationError as error:
             # SLA validation failed in scenario, determine what to do now
             if self.sla_action == "assert":
                 raise
             elif self.sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s", assertion.args)
-                errors = assertion.args
-        except Exception as e:
+                LOG.warning("SLA validation failed: %s", error.args)
+                errors = error.args
+        except Exception as e:  # pylint: disable=broad-except
             errors = traceback.format_exc()
             LOG.exception(e)
 
 
 import os
 
 from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
 
 LOG = logging.getLogger(__name__)
 
 
         try:
             result = method(data)
-        except AssertionError as assertion:
+        except y_exc.SLAValidationError as error:
             # SLA validation failed in scenario, determine what to do now
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s", assertion.args)
-                errors = assertion.args
-        except Exception as e:
+                LOG.warning("SLA validation failed: %s", error.args)
+                errors = error.args
+        except Exception as e:  # pylint: disable=broad-except
             errors = traceback.format_exc()
             LOG.exception(e)
         else:
 
         self.director.stopMonitors()
 
         verify_result = self.director.verify()
+        service_not_found = False
         for k, v in self.director.data.items():
             if v == 0:
-                result['sla_pass'] = 0
                 verify_result = False
+                service_not_found = True
                 LOG.info("\033[92m The service process (%s) not found in the host environment", k)
 
         result['sla_pass'] = 1 if verify_result else 0
         self.director.store_result(result)
 
-        assert verify_result is True, "The HA test case NOT passed"
+        self.verify_SLA(
+            verify_result, ("a service process was not found in the host "
+                            "environment" if service_not_found
+                            else "Director.verify() failed"))
 
     def teardown(self):
         self.director.knockoff()
 
         LOG.info("Monitor '%s' stop!", self.__scenario_type__)
 
         sla_pass = self.monitorMgr.verify_SLA()
+        service_not_found = False
         for k, v in self.data.items():
             if v == 0:
                 sla_pass = False
+                service_not_found = True
                 LOG.info("The service process (%s) not found in the host envrioment", k)
 
         result['sla_pass'] = 1 if sla_pass else 0
         self.monitorMgr.store_result(result)
 
-        assert sla_pass is True, "The HA test case NOT pass the SLA"
-
-        return
+        self.verify_SLA(
+            sla_pass, ("a service process was not found in the host "
+                       "environment" if service_not_found
+                       else "MonitorMgr.verify_SLA() failed"))
 
     def teardown(self):
         """scenario teardown"""
 
 from stevedore import extension
 
 import yardstick.common.utils as utils
+from yardstick.common import exceptions as y_exc
 
 
 def _iter_scenario_classes(scenario_type=None):
         """Time waited after executing the run method"""
         time.sleep(time_seconds)
 
+    def verify_SLA(self, condition, error_msg):
+        if not condition:
+            raise y_exc.SLAValidationError(
+                case_name=self.__scenario_type__, error_msg=error_msg)
+
     @staticmethod
     def get_types():
         """return a list of known runner type (class) names"""
 
 
     def _run_setup_cmd(self, client, cmd):
         LOG.debug("Run cmd: %s", cmd)
-        status, stdout, stderr = client.execute(cmd)
+        status, _, stderr = client.execute(cmd)
         if status:
             if re.search(self.REBOOT_CMD_PATTERN, cmd):
                 LOG.debug("Error on reboot")
                 if latency > sla_latency:
                     sla_error += "%s latency %d > sla:max_%s_latency(%d); " % \
                         (t, latency, t, sla_latency)
-            assert sla_error == "", sla_error
+            self.verify_SLA(sla_error == "", sla_error)
 
 
 def _test():    # pragma: no cover
 
             cmd = "sudo bash lmbench_latency_for_cache.sh %d %d" % \
                   (repetition, warmup)
         else:
-            raise RuntimeError("No such test_type: %s for Lmbench scenario",
-                               test_type)
+            raise RuntimeError("No such test_type: %s for Lmbench scenario"
+                               % test_type)
 
         LOG.debug("Executing command: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
                 if sla_latency < cache_latency:
                     sla_error += "latency %f > sla:max_latency(%f); " \
                         % (cache_latency, sla_latency)
-            assert sla_error == "", sla_error
+            self.verify_SLA(sla_error == "", sla_error)
 
 
 def _test():
 
             % (load, duration, events_string)
 
         LOG.debug("Executing command: %s", cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        status, stdout, _ = self.client.execute(cmd)
 
         if status:
             raise RuntimeError(stdout)
             exp_val = self.scenario_cfg['sla']['expected_value']
             smaller_than_exp = 'smaller_than_expected' \
                                in self.scenario_cfg['sla']
-
-            if metric not in result:
-                assert False, "Metric (%s) not found." % metric
-            else:
-                if smaller_than_exp:
-                    assert result[metric] < exp_val, "%s %d >= %d (sla); " \
-                        % (metric, result[metric], exp_val)
-                else:
-                    assert result[metric] >= exp_val, "%s %d < %d (sla); " \
-                        % (metric, result[metric], exp_val)
+            self.verify_SLA(metric in result,
+                            "Metric (%s) not found." % metric)
+            self.verify_SLA(
+                not smaller_than_exp,
+                "%s %d >= %d (sla); " % (metric, result[metric], exp_val))
+            self.verify_SLA(
+                result[metric] >= exp_val,
+                "%s %d < %d (sla); " % (metric, result[metric], exp_val))
 
 
 def _test():
 
 
     def _run_setup_cmd(self, client, cmd):
         LOG.debug("Run cmd: %s", cmd)
-        status, stdout, stderr = client.execute(cmd)
+        status, _, stderr = client.execute(cmd)
         if status:
             if re.search(self.REBOOT_CMD_PATTERN, cmd):
                 LOG.debug("Error on reboot")
                 if timevalue > sla_time:
                     sla_error += "%s timevalue %d > sla:max_%s(%d); " % \
                         (t, timevalue, t, sla_time)
-            assert sla_error == "", sla_error
+            self.verify_SLA(sla_error == "", sla_error)
 
 
 def _test():    # pragma: no cover
 
                   (test_id, load, block_size)
         # only the test_id 1-6 will be used in this scenario
         else:
-            raise RuntimeError("No such type_id: %s for Ramspeed scenario",
-                               test_id)
+            raise RuntimeError("No such type_id: %s for Ramspeed scenario"
+                               % test_id)
 
         LOG.debug("Executing command: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
                 if bw < sla_min_bw:
                     sla_error += "Bandwidth %f < " \
                         "sla:min_bandwidth(%f)" % (bw, sla_min_bw)
-            assert sla_error == "", sla_error
+            self.verify_SLA(sla_error == "", sla_error)
 
                 if score < sla_score:
                     sla_error += "%s score %f < sla:%s_score(%f); " % \
                         (t, score, t, sla_score)
-            assert sla_error == "", sla_error
+            self.verify_SLA(sla_error == "", sla_error)
 
 
 def _test():  # pragma: no cover
 
     def teardown(self):
         LOG.debug("teardown")
         self.host.close()
-        status, stdout, stderr = self.target.execute("pkill iperf3")
+        status, _, stderr = self.target.execute("pkill iperf3")
         if status:
             LOG.warning(stderr)
         self.target.close()
 
         LOG.debug("Executing command: %s", cmd)
 
-        status, stdout, stderr = self.host.execute(cmd)
+        status, stdout, _ = self.host.execute(cmd)
         if status:
             # error cause in json dict on stdout
             raise RuntimeError(stdout)
                 bit_per_second = \
                     int(iperf_result["end"]["sum_received"]["bits_per_second"])
                 bytes_per_second = bit_per_second / 8
-                assert bytes_per_second >= sla_bytes_per_second, \
-                    "bytes_per_second %d < sla:bytes_per_second (%d); " % \
-                    (bytes_per_second, sla_bytes_per_second)
+                self.verify_SLA(
+                    bytes_per_second >= sla_bytes_per_second,
+                    "bytes_per_second %d < sla:bytes_per_second (%d); "
+                    % (bytes_per_second, sla_bytes_per_second))
             else:
                 sla_jitter = float(sla_iperf["jitter"])
 
                 jitter_ms = float(iperf_result["end"]["sum"]["jitter_ms"])
-                assert jitter_ms <= sla_jitter, \
-                    "jitter_ms  %f > sla:jitter %f; " % \
-                    (jitter_ms, sla_jitter)
+                self.verify_SLA(jitter_ms <= sla_jitter,
+                                "jitter_ms  %f > sla:jitter %f; "
+                                % (jitter_ms, sla_jitter))
 
 
 def _test():
 
             throughput_rx_mpps = int(
                 self.scenario_cfg["sla"]["throughput_rx_mpps"])
 
-            assert throughput_rx_mpps <= moongen_result["tx_mpps"], \
-                "sla_throughput_rx_mpps %f > throughput_rx_mpps(%f); " % \
-                (throughput_rx_mpps, moongen_result["tx_mpps"])
+            self.verify_SLA(
+                throughput_rx_mpps <= moongen_result["tx_mpps"],
+                "sla_throughput_rx_mpps %f > throughput_rx_mpps(%f); "
+                % (throughput_rx_mpps, moongen_result["tx_mpps"]))
 
     def teardown(self):
         """cleanup after the test execution"""
 
             sla_max_mean_latency = int(
                 self.scenario_cfg["sla"]["mean_latency"])
 
-            assert mean_latency <= sla_max_mean_latency, \
-                "mean_latency %f > sla_max_mean_latency(%f); " % \
-                (mean_latency, sla_max_mean_latency)
+            self.verify_SLA(mean_latency <= sla_max_mean_latency,
+                            "mean_latency %f > sla_max_mean_latency(%f); "
+                            % (mean_latency, sla_max_mean_latency))
 
 
 def _test():
 
             sla_max_mean_latency = int(
                 self.scenario_cfg["sla"]["mean_latency"])
 
-            assert mean_latency <= sla_max_mean_latency, \
-                "mean_latency %f > sla_max_mean_latency(%f); " % \
-                (mean_latency, sla_max_mean_latency)
+            self.verify_SLA(
+                mean_latency <= sla_max_mean_latency,
+                "mean_latency %f > sla_max_mean_latency(%f); "
+                % (mean_latency, sla_max_mean_latency))
 
     def teardown(self):
         """remove netperf from nodes after test"""
 
                 if rate > sla_rate:
                     sla_error += "%s rate %f > sla:%s_rate(%f); " % \
                         (i, rate, i, sla_rate)
-            assert sla_error == "", sla_error
+            self.verify_SLA(sla_error == "", sla_error)
 
                 result.update(utils.flatten_dict_key(ping_result))
                 if sla_max_rtt is not None:
                     sla_max_rtt = float(sla_max_rtt)
-                    assert rtt_result[target_vm_name] <= sla_max_rtt,\
-                        "rtt %f > sla: max_rtt(%f); " % \
-                        (rtt_result[target_vm_name], sla_max_rtt)
+                    self.verify_SLA(
+                        rtt_result[target_vm_name] <= sla_max_rtt,
+                        "rtt %f > sla: max_rtt(%f); "
+                        % (rtt_result[target_vm_name], sla_max_rtt))
             else:
                 LOG.error("ping '%s' '%s' timeout", options, target_vm)
                 # we need to specify a result to satisfy influxdb schema
                 rtt_result[target_vm_name] = float(self.PING_ERROR_RTT)
                 # store result before potential AssertionError
                 result.update(utils.flatten_dict_key(ping_result))
-                if sla_max_rtt is not None:
-                    raise AssertionError("packet dropped rtt {:f} > sla: max_rtt({:f})".format(
-                        rtt_result[target_vm_name], sla_max_rtt))
-
-                else:
-                    raise AssertionError(
-                        "packet dropped rtt {:f}".format(rtt_result[target_vm_name]))
+                self.verify_SLA(sla_max_rtt is None,
+                                "packet dropped rtt %f > sla: max_rtt(%f)"
+                                % (rtt_result[target_vm_name], sla_max_rtt))
+                self.verify_SLA(False,
+                                "packet dropped rtt %f"
+                                % (rtt_result[target_vm_name]))
 
 
 def _test():    # pragma: no cover
 
             self._ssh_host(node_name)
             self.client._put_file_shell(
                 self.pre_setup_script, '~/pre_setup.sh')
-            status, stdout, stderr = self.client.execute(
-                "sudo bash pre_setup.sh")
+            self.client.execute("sudo bash pre_setup.sh")
 
     def _get_controller_node(self, host_list):
         for host_name in host_list:
         cmd = "sudo bash %s %s %s" % \
               (setup_bash_file, self.openrc, self.external_network)
         LOG.debug("Executing setup command: %s", cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        self.client.execute(cmd)
 
         self.setup_done = True
 
             result["rtt"] = float(stdout)
             if "sla" in self.scenario_cfg:
                 sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"])
-                assert result["rtt"] <= sla_max_rtt, \
-                    "rtt %f > sla:max_rtt(%f); " % (result["rtt"], sla_max_rtt)
+                self.verify_SLA(result["rtt"] <= sla_max_rtt,
+                                "rtt %f > sla:max_rtt(%f); "
+                                % (result["rtt"], sla_max_rtt))
         else:
             LOG.error("ping6 timeout!!!")
         self.run_done = True
             self._ssh_host(node_name)
             self.client._put_file_shell(
                 self.post_teardown_script, '~/post_teardown.sh')
-            status, stdout, stderr = self.client.execute(
-                "sudo bash post_teardown.sh")
+            self.client.execute("sudo bash post_teardown.sh")
 
         self.server.send_command(cmd)
         self.client.send_command(cmd)
 
-        """multiqueue setup"""
+        # multiqueue setup
         if not self._is_irqbalance_disabled():
             self._disable_irqbalance()
 
     def _disable_irqbalance(self):
         cmd = "sudo sed -i -e 's/ENABLED=\"1\"/ENABLED=\"0\"/g' " \
               "/etc/default/irqbalance"
-        status, stdout, stderr = self.server.execute(cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        status, _, stderr = self.server.execute(cmd)
+        status, _, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError(stderr)
 
         cmd = "sudo service irqbalance stop"
-        status, stdout, stderr = self.server.execute(cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        status, _, stderr = self.server.execute(cmd)
+        status, _, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError(stderr)
 
         cmd = "sudo service irqbalance disable"
-        status, stdout, stderr = self.server.execute(cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        status, _, stderr = self.server.execute(cmd)
+        status, _, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError(stderr)
 
             raise RuntimeError(stderr)
 
         cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
-        status, stdout, stderr = self.server.execute(cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        status, _, stderr = self.server.execute(cmd)
+        status, _, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError(stderr)
 
             raise RuntimeError(stderr)
 
         cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
-        status, stdout, stderr = self.server.execute(cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        status, _, stderr = self.server.execute(cmd)
+        status, _, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError(stderr)
 
 
             cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
                 % (smp_affinity_mask, int(stdout))
-            status, stdout, stderr = self.server.execute(cmd)
-            status, stdout, stderr = self.client.execute(cmd)
+            status, _, stderr = self.server.execute(cmd)
+            status, _, stderr = self.client.execute(cmd)
             if status:
                 raise RuntimeError(stderr)
 
 
             cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
                 % (smp_affinity_mask, int(stdout))
-            status, stdout, stderr = self.server.execute(cmd)
-            status, stdout, stderr = self.client.execute(cmd)
+            status, _, stderr = self.server.execute(cmd)
+            status, _, stderr = self.client.execute(cmd)
             if status:
                 raise RuntimeError(stderr)
 
             raise RuntimeError(stderr)
 
         cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
-        status, stdout, stderr = self.server.execute(cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        status, _, stderr = self.server.execute(cmd)
+        status, _, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError(stderr)
 
 
             cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
                 % (smp_affinity_mask, int(stdout))
-            status, stdout, stderr = self.server.execute(cmd)
-            status, stdout, stderr = self.client.execute(cmd)
+            status, _, stderr = self.server.execute(cmd)
+            status, _, stderr = self.client.execute(cmd)
             if status:
                 raise RuntimeError(stderr)
 
             cmd = "sudo ethtool -L %s combined %s" % \
                 (self.vnic_name, available_queue_number)
             LOG.debug("Executing command: %s", cmd)
-            status, stdout, stderr = self.server.execute(cmd)
-            status, stdout, stderr = self.client.execute(cmd)
+            status, _, stderr = self.server.execute(cmd)
+            status, _, stderr = self.client.execute(cmd)
             if status:
                 raise RuntimeError(stderr)
         return available_queue_number
         if "sla" in self.scenario_cfg:
             LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
             sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
-            assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
-                % (ppm, sla_max_ppm)
+            self.verify_SLA(ppm <= sla_max_ppm,
+                            "ppm %d > sla_max_ppm %d; " % (ppm, sla_max_ppm))
 
 
 def _test():  # pragma: no cover
 
             LOG.info("sla_max_latency: %d", sla_max_latency)
             debug_info = "avg_latency %d > sla_max_latency %d" \
                 % (avg_latency, sla_max_latency)
-            assert avg_latency <= sla_max_latency, debug_info
+            self.verify_SLA(avg_latency <= sla_max_latency, debug_info)
 
         cmd = "ip a | grep eth1 2>/dev/null"
         LOG.debug("Executing command: %s in %s", cmd, host)
         if "server" in host:
-            status, stdout, stderr = self.server.execute(cmd)
+            _, stdout, _ = self.server.execute(cmd)
             if stdout:
                 is_run = False
         else:
-            status, stdout, stderr = self.client.execute(cmd)
+            _, stdout, _ = self.client.execute(cmd)
             if stdout:
                 is_run = False
 
             ppm += (sent - received) % sent > 0
             LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
             sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
-            assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
-                % (ppm, sla_max_ppm)
+            self.verify_SLA(ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; "
+                                                % (ppm, sla_max_ppm))
 
         if 'sla' in self.scenario_cfg and \
            'metrics' in self.scenario_cfg['sla']:
             for metric in self.scenario_cfg['sla']['metrics'].split(','):
-                assert metric in result, \
-                    '%s is not collected by VSPERF' % (metric)
-                assert metric in self.scenario_cfg['sla'], \
-                    '%s is not defined in SLA' % (metric)
+                self.verify_SLA(metric in result,
+                                '%s was not collected by VSPERF' % metric)
+                self.verify_SLA(metric in self.scenario_cfg['sla'],
+                                '%s is not defined in SLA' % metric)
                 vs_res = float(result[metric])
                 sla_res = float(self.scenario_cfg['sla'][metric])
-                assert vs_res >= sla_res, \
-                    'VSPERF_%s(%f) < SLA_%s(%f)' % \
-                    (metric, vs_res, metric, sla_res)
+                self.verify_SLA(vs_res >= sla_res,
+                                'VSPERF_%s(%f) < SLA_%s(%f)'
+                                % (metric, vs_res, metric, sla_res))
 
     def teardown(self):
         """cleanup after the test execution"""
 
         is_run = True
         cmd = "ip a | grep %s 2>/dev/null" % (self.tg_port1)
         LOG.debug("Executing command: %s", cmd)
-        status, stdout, stderr = self.client.execute(cmd)
+        _, stdout, _ = self.client.execute(cmd)
         if stdout:
             is_run = False
         return is_run
         if 'sla' in self.scenario_cfg and \
            'metrics' in self.scenario_cfg['sla']:
             for metric in self.scenario_cfg['sla']['metrics'].split(','):
-                assert metric in result, \
-                    '%s is not collected by VSPERF' % (metric)
-                assert metric in self.scenario_cfg['sla'], \
-                    '%s is not defined in SLA' % (metric)
+                self.verify_SLA(metric in result,
+                                '%s was not collected by VSPERF' % metric)
+                self.verify_SLA(metric in self.scenario_cfg['sla'],
+                                '%s is not defined in SLA' % metric)
                 vs_res = float(result[metric])
                 sla_res = float(self.scenario_cfg['sla'][metric])
-                assert vs_res >= sla_res, \
-                    'VSPERF_%s(%f) < SLA_%s(%f)' % \
-                    (metric, vs_res, metric, sla_res)
+                self.verify_SLA(vs_res >= sla_res,
+                                'VSPERF_%s(%f) < SLA_%s(%f)'
+                                % (metric, vs_res, metric, sla_res))
 
     def teardown(self):
         """cleanup after the test execution"""
 
                         sla_error += "%s %d < " \
                             "sla:%s(%d); " % (k, v, k, min_v)
 
-            assert sla_error == "", sla_error
+            self.verify_SLA(sla_error == "", sla_error)
 
 
 def _test():
 
 
 class IxNetworkFieldNotPresentInStackItem(YardstickException):
     message = 'Field "%(field_name)s" not present in stack item %(stack_item)s'
+
+
+class SLAValidationError(YardstickException):
+    message = '%(case_name)s SLA validation failed. Error: %(error_msg)s'
 
 
 from yardstick.benchmark.runners.search import SearchRunner
 from yardstick.benchmark.runners.search import SearchRunnerHelper
+from yardstick.common import exceptions as y_exc
 
 
 class TestSearchRunnerHelper(unittest.TestCase):
     def test__worker_run_once_assertion_error_assert(self):
         runner = SearchRunner({})
         runner.sla_action = 'assert'
-        runner.worker_helper = mock.MagicMock(side_effect=AssertionError)
+        runner.worker_helper = mock.MagicMock(side_effect=y_exc.SLAValidationError)
 
-        with self.assertRaises(AssertionError):
+        with self.assertRaises(y_exc.SLAValidationError):
             runner._worker_run_once('sequence 1')
 
     def test__worker_run_once_assertion_error_monitor(self):
         runner = SearchRunner({})
         runner.sla_action = 'monitor'
-        runner.worker_helper = mock.MagicMock(side_effect=AssertionError)
+        runner.worker_helper = mock.MagicMock(side_effect=y_exc.SLAValidationError)
 
         self.assertFalse(runner._worker_run_once('sequence 1'))
 
 
 import unittest
 
 from yardstick.benchmark.scenarios.availability import scenario_general
+from yardstick.common import exceptions as y_exc
 
 class ScenarioGeneralTestCase(unittest.TestCase):
 
         self.instance.director.verify.return_value = False
         self.instance.director.data = {}
         ret = {}
-        self.assertRaises(AssertionError, self.instance.run, ret)
+        self.assertRaises(y_exc.SLAValidationError, self.instance.run, ret)
+        self.instance.teardown()
+        self.assertEqual(ret['sla_pass'], 0)
+
+    def test_scenario_general_case_service_not_found_fail(self):
+        self.instance.director.verify.return_value = True
+        self.instance.director.data = {"general-attacker": 0}
+        ret = {}
+        self.assertRaises(y_exc.SLAValidationError, self.instance.run, ret)
         self.instance.teardown()
         self.assertEqual(ret['sla_pass'], 0)
 
 import unittest
 
 from yardstick.benchmark.scenarios.availability import serviceha
+from yardstick.common import exceptions as y_exc
 
 
 class ServicehaTestCase(unittest.TestCase):
         mock_monitor.MonitorMgr().verify_SLA.return_value = False
 
         ret = {}
-        self.assertRaises(AssertionError, p.run, ret)
+        self.assertRaises(y_exc.SLAValidationError, p.run, ret)
+        self.assertEqual(ret['sla_pass'], 0)
+
+    @mock.patch.object(serviceha, 'baseattacker')
+    @mock.patch.object(serviceha, 'basemonitor')
+    def test__serviceha_run_service_not_found_sla_error(self, mock_monitor,
+                                                        *args):
+        p = serviceha.ServiceHA(self.args, self.ctx)
+
+        p.setup()
+        self.assertTrue(p.setup_done)
+        p.data["kill-process"] = 0
+
+        mock_monitor.MonitorMgr().verify_SLA.return_value = True
+
+        ret = {}
+        self.assertRaises(y_exc.SLAValidationError, p.run, ret)
         self.assertEqual(ret['sla_pass'], 0)
 
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.compute import cyclictest
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.compute.cyclictest.ssh')
         sample_output = '{"min": 100, "avg": 500, "max": 1000}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, c.run, result)
+        self.assertRaises(y_exc.SLAValidationError, c.run, result)
 
     def test_cyclictest_unsuccessful_sla_avg_latency(self, mock_ssh):
 
         sample_output = '{"min": 100, "avg": 500, "max": 1000}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, c.run, result)
+        self.assertRaises(y_exc.SLAValidationError, c.run, result)
 
     def test_cyclictest_unsuccessful_sla_max_latency(self, mock_ssh):
 
         sample_output = '{"min": 100, "avg": 500, "max": 1000}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, c.run, result)
+        self.assertRaises(y_exc.SLAValidationError, c.run, result)
 
     def test_cyclictest_unsuccessful_script_error(self, mock_ssh):
 
 
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.compute import lmbench
+from yardstick.common import exceptions as y_exc
 
 
 # pylint: disable=unused-argument
 
         sample_output = '[{"latency": 37.5, "size": 0.00049}]'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, l.run, self.result)
+        self.assertRaises(y_exc.SLAValidationError, l.run, self.result)
 
     def test_unsuccessful_bandwidth_run_sla(self, mock_ssh):
 
 
         sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 9925.5}'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, l.run, self.result)
+        self.assertRaises(y_exc.SLAValidationError, l.run, self.result)
 
     def test_successful_latency_for_cache_run_sla(self, mock_ssh):
 
 
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.compute import qemu_migrate
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.compute.qemu_migrate.ssh')
         sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, q.run, result)
+        self.assertRaises(y_exc.SLAValidationError, q.run, result)
 
     def test_qemu_migrate_unsuccessful_sla_downtime(self, mock_ssh):
 
         sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, q.run, result)
+        self.assertRaises(y_exc.SLAValidationError, q.run, result)
 
     def test_qemu_migrate_unsuccessful_sla_setuptime(self, mock_ssh):
 
         sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, q.run, result)
+        self.assertRaises(y_exc.SLAValidationError, q.run, result)
 
     def test_qemu_migrate_unsuccessful_script_error(self, mock_ssh):
 
 
 
 from yardstick.common import utils
 from yardstick.benchmark.scenarios.compute import ramspeed
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.compute.ramspeed.ssh')
  "Block_size(kb)": 16384, "Bandwidth(MBps)": 14128.94}, {"Test_type":\
  "INTEGER & WRITING", "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, r.run, self.result)
+        self.assertRaises(y_exc.SLAValidationError, r.run, self.result)
 
     def test_ramspeed_unsuccessful_script_error(self, mock_ssh):
         options = {
  "Bandwidth(MBps)": 1300.27}, {"Test_type": "INTEGER AVERAGE:",\
  "Bandwidth(MBps)": 2401.58}]}'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, r.run, self.result)
+        self.assertRaises(y_exc.SLAValidationError, r.run, self.result)
 
     def test_ramspeed_unsuccessful_unknown_type_run(self, mock_ssh):
         options = {
 
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.compute import unixbench
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.compute.unixbench.ssh')
         sample_output = '{"single_score":"200.7","parallel_score":"4395.9"}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, u.run, result)
+        self.assertRaises(y_exc.SLAValidationError, u.run, result)
 
     def test_unixbench_unsuccessful_sla_parallel_score(self, mock_ssh):
 
         sample_output = '{"signle_score":"2251.7","parallel_score":"3395.9"}'
 
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, u.run, result)
+        self.assertRaises(y_exc.SLAValidationError, u.run, result)
 
     def test_unixbench_unsuccessful_script_error(self, mock_ssh):
 
 
 
 from yardstick.common import utils
 from yardstick.benchmark.scenarios.networking import iperf3
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.networking.iperf3.ssh')
 
         sample_output = self._read_sample_output(self.output_name_tcp)
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_iperf_successful_sla_jitter(self, mock_ssh):
         options = {"protocol": "udp", "bandwidth": "20m"}
 
         sample_output = self._read_sample_output(self.output_name_udp)
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_iperf_successful_tcp_protocal(self, mock_ssh):
         options = {"protocol": "tcp", "nodelay": "yes"}
 
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.networking import netperf
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.networking.netperf.ssh')
 
         sample_output = self._read_sample_output()
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_netperf_unsuccessful_script_error(self, mock_ssh):
 
 
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.networking import netperf_node
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.networking.netperf_node.ssh')
 
         sample_output = self._read_sample_output()
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_netperf_node_unsuccessful_script_error(self, mock_ssh):
 
 
 import unittest
 
 from yardstick.benchmark.scenarios.networking import ping
+from yardstick.common import exceptions as y_exc
 
 
 class PingTestCase(unittest.TestCase):
         p = ping.Ping(args, self.ctx)
 
         mock_ssh.SSH.from_node().execute.return_value = (0, '100', '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     @mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh')
     def test_ping_unsuccessful_script_error(self, mock_ssh):
 
 import unittest
 
 from yardstick.benchmark.scenarios.networking import ping6
+from yardstick.common import exceptions as y_exc
 
 
 class PingTestCase(unittest.TestCase):
         p = ping6.Ping6(args, self.ctx)
         p.client = mock_ssh.SSH.from_node()
         mock_ssh.SSH.from_node().execute.side_effect = [(0, 'host1', ''), (0, 100, '')]
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
     def test_ping_unsuccessful_script_error(self, mock_ssh):
 
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.networking import pktgen
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.networking.pktgen.ssh')
         sample_output = '{"packets_per_second": 9753, "errors": 0, \
             "packets_sent": 149776, "packetsize": 60, "flows": 110}'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_pktgen_unsuccessful_script_error(self, mock_ssh):
 
 
 
 import yardstick.common.utils as utils
 from yardstick.benchmark.scenarios.networking import pktgen_dpdk
+from yardstick.common import exceptions as y_exc
 
 
 class PktgenDPDKLatencyTestCase(unittest.TestCase):
 
         sample_output = '100\n110\n112\n130\n149\n150\n90\n150\n200\n162\n'
         self.mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_pktgen_dpdk_unsuccessful_script_error(self):
 
 
 import mock
 
 from yardstick.benchmark.scenarios.networking import pktgen_dpdk_throughput
+from yardstick.common import exceptions as y_exc
 
 
 # pylint: disable=unused-argument
         sample_output = '{"packets_per_second": 9753, "errors": 0, \
             "packets_sent": 149776, "flows": 110}'
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_pktgen_dpdk_throughput_unsuccessful_script_error(
             self, mock_ssh):
 
 from oslo_serialization import jsonutils
 
 from yardstick.benchmark.scenarios.storage import fio
+from yardstick.common import exceptions as y_exc
 
 
 @mock.patch('yardstick.benchmark.scenarios.storage.fio.ssh')
 
         sample_output = self._read_sample_output(self.sample_output['rw'])
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_fio_successful_bw_iops_sla(self, mock_ssh):
 
 
         sample_output = self._read_sample_output(self.sample_output['rw'])
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, result)
+        self.assertRaises(y_exc.SLAValidationError, p.run, result)
 
     def test_fio_unsuccessful_script_error(self, mock_ssh):