Add Benchmarking Unit to ApexLake 39/5139/5
authorVincenzo Riccobene <vincenzox.m.riccobene@intel.com>
Wed, 23 Dec 2015 13:47:07 +0000 (13:47 +0000)
committerJörgen Karlsson <jorgen.w.karlsson@ericsson.com>
Wed, 23 Dec 2015 15:04:09 +0000 (15:04 +0000)
Includes benchmarking unit to run benchmarks in ApexLake
Also includes tests.

JIRA: YARDSTICK-35

Change-Id: I67c62efd6cbe0883963e8c4ddc86d003b1d355ac
Signed-off-by: Vincenzo Riccobene <vincenzox.m.riccobene@intel.com>
yardstick/vTC/apexlake/experimental_framework/benchmarking_unit.py [new file with mode: 0644]
yardstick/vTC/apexlake/tests/benchmarking_unit_test.py [new file with mode: 0644]

diff --git a/yardstick/vTC/apexlake/experimental_framework/benchmarking_unit.py b/yardstick/vTC/apexlake/experimental_framework/benchmarking_unit.py
new file mode 100644 (file)
index 0000000..a688f4a
--- /dev/null
@@ -0,0 +1,281 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''
+The Benchmarking Unit manages the Benchmarking of VNFs orchestrating the
+initialization, execution and finalization
+'''
+
+
+import json
+import time
+import inspect
+
+from experimental_framework.benchmarks import benchmark_base_class as base
+from experimental_framework import common
+from experimental_framework import data_manager as data
+from experimental_framework import heat_template_generation as heat
+from experimental_framework import deployment_unit as deploy
+
+
+class BenchmarkingUnit:
+    """
+    Management of the overall Benchmarking process
+    """
+
+    def __init__(self, heat_template_name, openstack_credentials,
+                 heat_template_parameters, iterations, benchmarks):
+        """
+        :param heat_template_name: (str) Name of the heat template.
+
+        :param openstack_credentials: (dict) Credentials for openstack.
+                        Required fields are: 'ip_controller', 'heat_url',
+                        'user', 'password', 'auth_uri', 'project'.
+
+        :param heat_template_parameters: (dict) parameters to be given as
+                        input to the heat template. Required keys depend on
+                        the specific heat template.
+
+        :param iterations: (int) number of cycles to be executed.
+
+        :param benchmarks: (list[str]) List of the names of the
+                        benchmarks/test_cases to be executed in the cycle.
+
+        :return: None
+        """
+        # Loads vars from configuration file
+        self.template_file_extension = common.TEMPLATE_FILE_EXTENSION
+        self.template_dir = common.get_template_dir()
+        self.results_directory = str(common.RESULT_DIR) + str(time.time())
+
+        # Initializes other internal variable from parameters
+        self.template_name = heat_template_name
+        self.iterations = iterations
+        self.required_benchmarks = benchmarks
+        self.template_files = []
+        self.benchmarks = list()
+        self.benchmark_names = list()
+        self.data_manager = data.DataManager(self.results_directory)
+        self.heat_template_parameters = heat_template_parameters
+        self.template_files = \
+            heat.get_all_heat_templates(self.template_dir,
+                                        self.template_file_extension)
+        common.DEPLOYMENT_UNIT = deploy.DeploymentUnit(openstack_credentials)
+
+    def initialize(self):
+        """
+        Initialize the environment in order to run the benchmarking
+
+        :return: None
+        """
+        for benchmark in self.required_benchmarks:
+            benchmark_class = BenchmarkingUnit.get_benchmark_class(
+                benchmark['name'])
+            # Need to generate a unique name for the benchmark
+            # (since there is the possibility to have different
+            # instances of the same benchmark)
+            self.benchmarks.append(benchmark_class(
+                self.get_benchmark_name(benchmark['name']),
+                benchmark['params']))
+
+        for template_file_name in self.template_files:
+            experiment_name = BenchmarkingUnit.extract_experiment_name(
+                template_file_name)
+            self.data_manager.create_new_experiment(experiment_name)
+            for benchmark in self.benchmarks:
+                self.data_manager.add_benchmark(experiment_name,
+                                                benchmark.get_name())
+
+    def finalize(self):
+        """
+        Finalizes the Benchmarking Unit
+        Destroys all the stacks deployed by the framework and save results on
+        csv file.
+
+        :return: None
+        """
+        self.data_manager.generate_result_csv_file()
+        common.DEPLOYMENT_UNIT.destroy_all_deployed_stacks()
+
+    def run_benchmarks(self):
+        """
+        Runs all the requested benchmarks and collect the results.
+
+        :return: None
+        """
+        common.LOG.info('Run Benchmarking Unit')
+
+        experiment = dict()
+        result = dict()
+        for iteration in range(0, self.iterations):
+            common.LOG.info('Iteration ' + str(iteration))
+            for template_file_name in self.template_files:
+                experiment_name = BenchmarkingUnit.\
+                    extract_experiment_name(template_file_name)
+                experiment['experiment_name'] = experiment_name
+                configuration = self.\
+                    get_experiment_configuration(template_file_name)
+                # self.data_manager.add_configuration(experiment_name,
+                #                                     configuration)
+                for key in configuration.keys():
+                    experiment[key] = configuration[key]
+                # metadata = dict()
+                # metadata['experiment_name'] = experiment_name
+                # self.data_manager.add_metadata(experiment_name, metadata)
+
+                # For each benchmark in the cycle the workload is deployed
+                for benchmark in self.benchmarks:
+                    log_msg = 'Benchmark {} started on {}'.format(
+                        benchmark.get_name(), template_file_name
+                    )
+                    common.LOG.info(log_msg)
+
+                    # Initialization of Benchmark
+                    benchmark.init()
+                    log_msg = 'Template {} deployment START'.\
+                        format(experiment_name)
+                    common.LOG.info(log_msg)
+
+                    # Deployment of the workload
+                    deployment_success = \
+                        common.DEPLOYMENT_UNIT.deploy_heat_template(
+                            self.template_dir + template_file_name,
+                            experiment_name,
+                            self.heat_template_parameters)
+
+                    if deployment_success:
+                        log_msg = 'Template {} deployment COMPLETED'.format(
+                            experiment_name)
+                        common.LOG.info(log_msg)
+                    else:
+                        log_msg = 'Template {} deployment FAILED'.format(
+                            experiment_name)
+                        common.LOG.info(log_msg)
+                        continue
+
+                    # Running the Benchmark/test case
+                    result = benchmark.run()
+                    # self.data_manager.add_data_points(experiment_name,
+                    #                                   benchmark.get_name(),
+                    #                                   result)
+
+                    # Terminate the workload
+                    log_msg = 'Destroying deployment for experiment {}'.\
+                        format(experiment_name)
+                    common.LOG.info(log_msg)
+                    common.DEPLOYMENT_UNIT.destroy_heat_template(
+                        experiment_name)
+
+                    # Finalize the benchmark
+                    benchmark.finalize()
+                    log_msg = 'Benchmark {} terminated'.format(
+                        benchmark.__class__.__name__)
+                    common.LOG.info(log_msg)
+                    # self.data_manager.generate_result_csv_file()
+
+                    experiment['benchmark'] = benchmark.get_name()
+                    for key in benchmark.get_params():
+                        experiment[key] = benchmark.get_params()[key]
+                common.LOG.info('Benchmark Finished')
+                self.data_manager.generate_result_csv_file()
+        common.LOG.info('Benchmarking Unit: Experiments completed!')
+        return result
+
+    def get_experiment_configuration(self, template_file_name):
+        """
+        Reads and returns the configuration for the specific experiment
+        (heat template)
+
+        :param template_file_name: (str) Name of the file for the heat
+                        template for which it is requested the configuration
+
+        :return: dict() Configuration parameters and values
+        """
+        file_name = "{}{}.json".format(self.template_dir, template_file_name)
+        with open(file_name) as json_file:
+            configuration = json.load(json_file)
+        return configuration
+
+    def get_benchmark_name(self, name, instance=0):
+        """
+        Returns the name to be used for the benchmark/test case (TC).
+        This is required since each benchmark/TC could be run more than once
+        within the same cycle, with different initialization parameters.
+        In order to distinguish between them, a unique name is generated.
+
+        :param name: (str) original name of the benchmark/TC
+
+        :param instance: (int) number of instance already in the queue for
+                        this type of benchmark/TC.
+
+        :return: (str) name to be assigned to the benchmark/TC
+        """
+        if name + "_" + str(instance) in self.benchmark_names:
+            instance += 1
+            return self.get_benchmark_name(name, instance)
+        self.benchmark_names.append(name + "_" + str(instance))
+        return name + "_" + str(instance)
+
+    @staticmethod
+    def extract_experiment_name(template_file_name):
+        """
+        Generates a unique experiment name for a given template.
+
+        :param template_file_name: (str) File name of the template used
+                        during the experiment string
+
+        :return: (str) Experiment Name
+        """
+        strings = template_file_name.split('.')
+        return ".".join(strings[:(len(strings)-1)])
+
+    @staticmethod
+    def get_benchmark_class(complete_module_name):
+        """
+        Returns the classes included in a given module.
+
+        :param complete_module_name: (str) Complete name of the module as
+                        returned by get_available_test_cases.
+
+        :return: Class related to the benchmark/TC present in the requested
+                        module.
+        """
+        strings = complete_module_name.split('.')
+        class_name = 'experimental_framework.benchmarks.{}'.format(strings[0])
+        pkg = __import__(class_name, globals(), locals(), [], -1)
+        module = getattr(getattr(pkg, 'benchmarks'), strings[0])
+        members = inspect.getmembers(module)
+        for m in members:
+            if inspect.isclass(m[1]):
+                class_name = m[1]("", dict()).__class__.__name__
+                if isinstance(m[1]("", dict()), base.BenchmarkBaseClass) and \
+                        not class_name == 'BenchmarkBaseClass':
+                    return m[1]
+
+    @staticmethod
+    def get_required_benchmarks(required_benchmarks):
+        """
+        Returns instances of required test cases.
+
+        :param required_benchmarks: (list() of strings) Benchmarks to be
+                        executed by the experimental framework.
+
+        :return: list() of BenchmarkBaseClass
+        """
+        benchmarks = list()
+        for b in required_benchmarks:
+            class_ = BenchmarkingUnit.get_benchmark_class(b)
+            instance = class_("", dict())
+            benchmarks.append(instance)
+        return benchmarks
diff --git a/yardstick/vTC/apexlake/tests/benchmarking_unit_test.py b/yardstick/vTC/apexlake/tests/benchmarking_unit_test.py
new file mode 100644 (file)
index 0000000..b0f800a
--- /dev/null
@@ -0,0 +1,470 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = 'vmriccox'
+
+
+import unittest
+import mock
+from experimental_framework.benchmarking_unit import BenchmarkingUnit
+from experimental_framework.data_manager import DataManager
+from experimental_framework.deployment_unit import DeploymentUnit
+import experimental_framework.common as common
+from experimental_framework.benchmarks.rfc2544_throughput_benchmark import \
+    RFC2544ThroughputBenchmark
+
+
+class DummyDataManager(DataManager):
+
+    def __init__(self, experiment_directory):
+        self.experiment_directory = experiment_directory
+        self.experiments = dict()
+        self.new_exp_counter = 0
+        self.add_bench_counter = 0
+        self.close_experiment_1_counter = 0
+        self.close_experiment_2_counter = 0
+        self.generate_csv_counter = 0
+
+    def create_new_experiment(self, experiment_name, get_counter=None):
+        if not get_counter:
+            self.new_exp_counter += 1
+        else:
+            return self.new_exp_counter
+
+    def add_benchmark(self, experiment_name, benchmark_name, get_counter=None):
+        if not get_counter:
+            self.add_bench_counter += 1
+        else:
+            return self.add_bench_counter
+
+    def close_experiment(self, experiment, get_counter=None):
+        if get_counter:
+            return [self.close_experiment_1_counter,
+                    self.close_experiment_2_counter]
+        if experiment == 'VTC_base_single_vm_wait_1':
+            self.close_experiment_1_counter += 1
+        if experiment == 'VTC_base_single_vm_wait_2':
+            self.close_experiment_2_counter += 1
+
+    def generate_result_csv_file(self, get_counter=None):
+        if get_counter:
+            return self.generate_csv_counter
+        else:
+            self.generate_csv_counter += 1
+
+    def add_metadata(self, experiment_name, metadata):
+        pass
+
+    def add_configuration(self, experiment_name, configuration):
+        pass
+
+    def add_data_points(self, experiment_name, benchmark_name, result):
+        pass
+
+
+class Dummy_2544(RFC2544ThroughputBenchmark):
+
+    def __init__(self, name, params):
+        self.name = name
+        self.init_counter = 0
+        self.finalize_counter = 0
+        self.run_counter = 0
+        self.params = params
+
+    def init(self, get_counter=None):
+        if get_counter:
+            return self.init_counter
+        else:
+            self.init_counter += 1
+
+    def finalize(self, get_counter=None):
+        if get_counter:
+            return self.finalize_counter
+        else:
+            self.finalize_counter += 1
+
+    def run(self, get_counter=None):
+        if get_counter:
+            return self.run_counter
+        else:
+            self.run_counter += 1
+        return {'throughput': 10}
+
+
+class DummyDeploymentUnit(DeploymentUnit):
+
+    def __init__(self, openstack_credentials):
+        pass
+
+    def deploy_heat_template(self, template_file, stack_name, parameters,
+                             attempt=0):
+        return False
+
+
+class TestBenchmarkingUnit(unittest.TestCase):
+
+    def setUp(self):
+        pass
+
+    def tearDown(self):
+        pass
+
+    @mock.patch('time.time')
+    @mock.patch('experimental_framework.common.get_template_dir')
+    @mock.patch('experimental_framework.data_manager.DataManager',
+                side_effect=DummyDataManager)
+    @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
+    @mock.patch('experimental_framework.benchmarking_unit.heat.'
+                'get_all_heat_templates')
+    def test___init__(self, mock_heat, mock_dep_unit, mock_data_manager,
+                      mock_temp_dir, mock_time):
+        mock_heat.return_value = list()
+        mock_time.return_value = '12345'
+        mock_temp_dir.return_value = 'tests/data/results/'
+        common.TEMPLATE_FILE_EXTENSION = '.ext'
+        common.RESULT_DIR = 'tests/data/results/'
+        heat_template_name = 'name'
+        openstack_credentials = {
+            'name': 'aaa',
+            'surname': 'bbb'
+        }
+        heat_template_parameters = {
+            'param_1': 'name_1',
+            'param_2': 'name_2'
+        }
+        iterations = 1
+        benchmarks = ['bench_1', 'bench_2']
+        bu = BenchmarkingUnit(heat_template_name,
+                              openstack_credentials,
+                              heat_template_parameters,
+                              iterations,
+                              benchmarks)
+        self.assertEqual(bu.required_benchmarks, benchmarks)
+        bu.heat_template_parameters = heat_template_parameters
+        mock_data_manager.assert_called_once_with('tests/data/results/12345')
+        mock_dep_unit.assert_called_once_with(openstack_credentials)
+        mock_heat.assert_called_once_with('tests/data/results/', '.ext')
+
+    @mock.patch('experimental_framework.benchmarks.'
+                'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
+    @mock.patch('time.time')
+    @mock.patch('experimental_framework.common.get_template_dir')
+    @mock.patch('experimental_framework.data_manager.DataManager',
+                side_effect=DummyDataManager)
+    @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
+    @mock.patch('experimental_framework.benchmarking_unit.'
+                'heat.get_all_heat_templates')
+    def test_initialize_for_success(self, mock_heat, mock_dep_unit,
+                                    mock_data_manager, mock_temp_dir,
+                                    mock_time, mock_rfc2544):
+        mock_heat.return_value = list()
+        mock_time.return_value = '12345'
+        mock_temp_dir.return_value = 'tests/data/test_templates/'
+        common.TEMPLATE_FILE_EXTENSION = '.yaml'
+        common.RESULT_DIR = 'tests/data/results/'
+
+        heat_template_name = 'VTC_base_single_vm_wait_'
+        openstack_credentials = {
+            'name': 'aaa',
+            'surname': 'bbb'
+        }
+        heat_template_parameters = {
+            'param_1': 'name_1',
+            'param_2': 'name_2'
+        }
+        iterations = 1
+        benchmarks = [
+            {
+                'name':
+                    'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark',
+                'params': dict()
+            }
+        ]
+        bu = BenchmarkingUnit(heat_template_name,
+                              openstack_credentials,
+                              heat_template_parameters,
+                              iterations,
+                              benchmarks)
+        self.assertEqual(bu.required_benchmarks, benchmarks)
+        bu.heat_template_parameters = heat_template_parameters
+        bu.template_files = ['VTC_base_single_vm_wait_1.yaml',
+                             'VTC_base_single_vm_wait_2.yaml']
+        bu.initialize()
+        self.assertTrue(len(bu.benchmarks) == 1)
+        self.assertEqual(bu.benchmarks[0].__class__,
+                         Dummy_2544)
+        self.assertEqual(bu.data_manager.create_new_experiment('', True), 2)
+        self.assertEqual(bu.data_manager.add_benchmark('', '', True), 2)
+
+    @mock.patch('experimental_framework.benchmarks.'
+                'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
+    @mock.patch('time.time')
+    @mock.patch('experimental_framework.common.get_template_dir')
+    @mock.patch('experimental_framework.data_manager.DataManager',
+                side_effect=DummyDataManager)
+    @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
+    @mock.patch('experimental_framework.benchmarking_unit.'
+                'heat.get_all_heat_templates')
+    def test_finalize_for_success(
+            self, mock_heat, mock_dep_unit, mock_data_manager, mock_temp_dir,
+            mock_time, mock_rfc2544):
+        mock_heat.return_value = list()
+        mock_time.return_value = '12345'
+        mock_temp_dir.return_value = 'tests/data/test_templates/'
+        common.TEMPLATE_FILE_EXTENSION = '.yaml'
+        common.RESULT_DIR = 'tests/data/results/'
+
+        heat_template_name = 'VTC_base_single_vm_wait_'
+        openstack_credentials = {
+            'name': 'aaa',
+            'surname': 'bbb'
+        }
+        heat_template_parameters = {
+            'param_1': 'name_1',
+            'param_2': 'name_2'
+        }
+        iterations = 1
+        benchmarks = [
+            {
+                'name':
+                    'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark',
+                'params': dict()
+            }
+        ]
+        bu = BenchmarkingUnit(heat_template_name,
+                              openstack_credentials,
+                              heat_template_parameters,
+                              iterations,
+                              benchmarks)
+        bu.heat_template_parameters = heat_template_parameters
+        bu.template_files = ['VTC_base_single_vm_wait_1.yaml',
+                             'VTC_base_single_vm_wait_2.yaml']
+        bu.finalize()
+        # self.assertEqual(bu.data_manager.close_experiment('', True), [1, 1])
+        self.assertEqual(bu.data_manager.generate_result_csv_file(True), 1)
+
+    @mock.patch('experimental_framework.common.push_data_influxdb')
+    @mock.patch('experimental_framework.common.LOG')
+    @mock.patch('experimental_framework.benchmarks.'
+                'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
+    @mock.patch('time.time')
+    @mock.patch('experimental_framework.common.get_template_dir')
+    @mock.patch('experimental_framework.data_manager.DataManager',
+                side_effect=DummyDataManager)
+    @mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
+    @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
+    @mock.patch('experimental_framework.benchmarking_unit.'
+                'heat.get_all_heat_templates')
+    def test_run_benchmarks_for_success(self, mock_heat, mock_common_dep_unit,
+                                        mock_dep_unit, mock_data_manager,
+                                        mock_temp_dir, mock_time,
+                                        mock_rfc2544, mock_log, mock_influx):
+        mock_heat.return_value = list()
+        mock_time.return_value = '12345'
+        mock_temp_dir.return_value = 'tests/data/test_templates/'
+        common.TEMPLATE_FILE_EXTENSION = '.yaml'
+        common.RESULT_DIR = 'tests/data/results/'
+        common.INFLUXDB_IP = 'InfluxIP'
+        common.INFLUXDB_PORT = '8086'
+        common.INFLUXDB_DB_NAME = 'test_db'
+
+        heat_template_name = 'VTC_base_single_vm_wait_'
+        openstack_credentials = {
+            'name': 'aaa',
+            'surname': 'bbb'
+        }
+        heat_template_parameters = {
+            'param_1': 'name_1',
+            'param_2': 'name_2'
+        }
+        iterations = 1
+        benchmarks = [
+            {
+                'name':
+                    'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark',
+                'params': dict()
+            }
+        ]
+        bu = BenchmarkingUnit(heat_template_name,
+                              openstack_credentials,
+                              heat_template_parameters,
+                              iterations,
+                              benchmarks)
+        bu.data_manager = DummyDataManager('tests/data/results/12345')
+        bu.template_files = ['VTC_base_single_vm_wait_1.yaml',
+                             'VTC_base_single_vm_wait_2.yaml']
+        bu.benchmarks = [Dummy_2544('dummy', {'param1': 'val1'})]
+        bu.run_benchmarks()
+        self.assertEqual(bu.benchmarks[0].init(True), 2)
+        self.assertEqual(bu.benchmarks[0].finalize(True), 2)
+        self.assertEqual(bu.benchmarks[0].run(True), 2)
+        # expected_metric = \
+        #     'throughput,vnic_type=direct,ram=1024,benchmark=dummy,' \
+        #     'vcpus=2,experiment_name=VTC_base_single_vm_wait_2,' \
+        #     'param1=val1 value=10 12345000000000'
+        # mock_influx.assert_called_with(expected_metric)
+
+    @mock.patch('experimental_framework.common.LOG')
+    @mock.patch('experimental_framework.benchmarks.'
+                'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
+    @mock.patch('time.time')
+    @mock.patch('experimental_framework.common.get_template_dir')
+    @mock.patch('experimental_framework.data_manager.DataManager',
+                side_effect=DummyDataManager)
+    @mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
+    @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
+    @mock.patch('experimental_framework.benchmarking_unit.'
+                'heat.get_all_heat_templates')
+    def test_run_benchmarks_2_for_success(
+            self, mock_heat, mock_common_dep_unit, mock_dep_unit,
+            mock_data_manager, mock_temp_dir, mock_time, mock_rfc2544,
+            mock_log):
+        mock_heat.return_value = list()
+        mock_time.return_value = '12345'
+        mock_temp_dir.return_value = 'tests/data/test_templates/'
+        common.TEMPLATE_FILE_EXTENSION = '.yaml'
+        common.RESULT_DIR = 'tests/data/results/'
+
+        heat_template_name = 'VTC_base_single_vm_wait_'
+        openstack_credentials = {
+            'name': 'aaa',
+            'surname': 'bbb'
+        }
+        heat_template_parameters = {
+            'param_1': 'name_1',
+            'param_2': 'name_2'
+        }
+        iterations = 1
+        benchmarks = [
+            {
+                'name':
+                    'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark',
+                'params': dict()
+            }
+        ]
+        bu = BenchmarkingUnit(heat_template_name,
+                              openstack_credentials,
+                              heat_template_parameters,
+                              iterations,
+                              benchmarks)
+        bu.data_manager = DummyDataManager('tests/data/results/12345')
+        bu.template_files = ['VTC_base_single_vm_wait_1.yaml',
+                             'VTC_base_single_vm_wait_2.yaml']
+        bu.benchmarks = [Dummy_2544('dummy', dict())]
+        common.DEPLOYMENT_UNIT = DummyDeploymentUnit(dict())
+        bu.run_benchmarks()
+        self.assertEqual(bu.benchmarks[0].init(True), 2)
+        self.assertEqual(bu.benchmarks[0].finalize(True), 0)
+        self.assertEqual(bu.benchmarks[0].run(True), 0)
+
+    @mock.patch('experimental_framework.common.LOG')
+    @mock.patch('experimental_framework.benchmarks.'
+                'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
+    @mock.patch('time.time')
+    @mock.patch('experimental_framework.common.get_template_dir')
+    @mock.patch('experimental_framework.data_manager.DataManager',
+                side_effect=DummyDataManager)
+    @mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
+    @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
+    @mock.patch('experimental_framework.benchmarking_unit.'
+                'heat.get_all_heat_templates')
+    def test_get_benchmark_name_for_success(
+            self, mock_heat, mock_common_dep_unit, mock_dep_unit,
+            mock_data_manager, mock_temp_dir, mock_time, mock_rfc2544,
+            mock_log):
+        mock_heat.return_value = list()
+        mock_time.return_value = '12345'
+        mock_temp_dir.return_value = 'tests/data/test_templates/'
+        common.TEMPLATE_FILE_EXTENSION = '.yaml'
+        common.RESULT_DIR = 'tests/data/results/'
+
+        heat_template_name = 'VTC_base_single_vm_wait_'
+        openstack_credentials = {
+            'name': 'aaa',
+            'surname': 'bbb'
+        }
+        heat_template_parameters = {
+            'param_1': 'name_1',
+            'param_2': 'name_2'
+        }
+        iterations = 1
+        benchmarks = [
+            {
+                'name':
+                    'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark',
+                'params': dict()
+            }
+        ]
+        bu = BenchmarkingUnit(heat_template_name,
+                              openstack_credentials,
+                              heat_template_parameters,
+                              iterations,
+                              benchmarks)
+
+        expected = 'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark_0'
+        output = bu.get_benchmark_name(
+            'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark')
+        self.assertEqual(expected, output)
+
+        expected = 'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark_1'
+        output = bu.get_benchmark_name(
+            'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark')
+        self.assertEqual(expected, output)
+
+    @mock.patch('experimental_framework.common.LOG')
+    @mock.patch('experimental_framework.benchmarks.'
+                'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
+    @mock.patch('time.time')
+    @mock.patch('experimental_framework.common.get_template_dir')
+    @mock.patch('experimental_framework.data_manager.DataManager',
+                side_effect=DummyDataManager)
+    @mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
+    @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
+    @mock.patch('experimental_framework.benchmarking_unit.'
+                'heat.get_all_heat_templates')
+    def test_get_required_benchmarks_for_success(
+            self, mock_heat, mock_common_dep_unit, mock_dep_unit,
+            mock_data_manager, mock_temp_dir, mock_time, mock_rfc2544,
+            mock_log):
+        mock_heat.return_value = list()
+        mock_time.return_value = '12345'
+        mock_temp_dir.return_value = 'tests/data/test_templates/'
+        common.TEMPLATE_FILE_EXTENSION = '.yaml'
+        common.RESULT_DIR = 'tests/data/results/'
+        openstack_credentials = {
+            'name': 'aaa',
+            'surname': 'bbb'
+        }
+        heat_template_parameters = {
+            'param_1': 'name_1',
+            'param_2': 'name_2'
+        }
+        iterations = 1
+        benchmarks = [
+            {
+                'name':
+                    'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark',
+                'params': dict()
+            }
+        ]
+        bu = BenchmarkingUnit('',
+                              openstack_credentials,
+                              heat_template_parameters,
+                              iterations,
+                              benchmarks)
+        req_benchs = \
+            ['rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark']
+        output = bu.get_required_benchmarks(req_benchs)
+        self.assertEqual(len(req_benchs), 1)
+        self.assertEqual(output[0].__class__, Dummy_2544)