data = {}
         errors = ""
 
+        benchmark.pre_run_wait_time(interval)
+
         try:
             result = method(data)
         except AssertionError as assertion:
                 errors = assertion.args
         # catch all exceptions because with multiprocessing we can have un-picklable exception
         # problems  https://bugs.python.org/issue9400
-        except Exception:
+        except Exception:  # pylint: disable=broad-except
             errors = traceback.format_exc()
             LOG.exception("")
         else:
                 # if we do timeout we don't care about dropping individual KPIs
                 output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
 
-        time.sleep(interval)
+        benchmark.post_run_wait_time(interval)
 
         benchmark_output = {
             'timestamp': time.time(),
 
             data = {}
             errors = ""
 
+            benchmark.pre_run_wait_time(interval)
+
             try:
                 result = method(data)
             except AssertionError as assertion:
                     scenario_cfg['options']['rate'] -= delta
                     sequence = 1
                     continue
-            except Exception:
+            except Exception:  # pylint: disable=broad-except
                 errors = traceback.format_exc()
                 LOG.exception("")
             else:
                     # if we do timeout we don't care about dropping individual KPIs
                     output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
 
-            time.sleep(interval)
+            benchmark.post_run_wait_time(interval)
 
             benchmark_output = {
                 'timestamp': time.time(),
 
 #    under the License.
 
 import abc
+import time
 
 import six
 from stevedore import extension
         """Default teardown implementation for Scenario classes"""
         pass
 
+    def pre_run_wait_time(self, time_seconds):
+        """Time waited before executing the run method"""
+        pass
+
+    def post_run_wait_time(self, time_seconds):
+        """Time waited after executing the run method"""
+        time.sleep(time_seconds)
+
     @staticmethod
     def get_types():
         """return a list of known runner type (class) names"""
 
 
 import copy
 import logging
+import time
 
 import ipaddress
 from itertools import chain
             # https://bugs.python.org/issue9400
             LOG.exception("")
             raise RuntimeError("Error in teardown")
+
+    def pre_run_wait_time(self, time_seconds):
+        """Time waited before executing the run method"""
+        time.sleep(time_seconds)
+
+    def post_run_wait_time(self, time_seconds):
+        """Time waited after executing the run method"""
+        pass
 
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import time
+
+import mock
+
 from yardstick.benchmark.scenarios import base
 from yardstick.tests.unit import base as ut_base
 
 
+class _TestScenario(base.Scenario):
+    __scenario_type__ = 'Test Scenario'
+
+    def run(self):
+        pass
+
+
 class ScenarioTestCase(ut_base.BaseUnitTestCase):
 
     def test_get_scenario_type(self):
         with self.assertRaises(TypeError):
             base.Scenario()
 
+    @mock.patch.object(time, 'sleep')
+    def test_pre_run_wait_time(self, mock_sleep):
+        """Ensure default behaviour (backwards compatibility): no wait time"""
+        test_scenario = _TestScenario()
+        test_scenario.pre_run_wait_time(mock.ANY)
+        mock_sleep.assert_not_called()
+
+    @mock.patch.object(time, 'sleep')
+    def test_post_run_wait_time(self, mock_sleep):
+        """Ensure default behaviour (backwards compatibility): wait time"""
+        test_scenario = _TestScenario()
+        test_scenario.post_run_wait_time(100)
+        mock_sleep.assert_called_once_with(100)
+
 
 class IterScenarioClassesTestCase(ut_base.BaseUnitTestCase):