Fix some small things in ApexLake tests 89/5589/4
authorVincenzo Riccobene <vincenzox.m.riccobene@intel.com>
Tue, 5 Jan 2016 15:28:05 +0000 (15:28 +0000)
committerJörgen Karlsson <jorgen.w.karlsson@ericsson.com>
Wed, 6 Jan 2016 22:21:41 +0000 (22:21 +0000)
JIRA: YARDSTICK-35

Change-Id: I280365dd64d2f6c00befb984acd0d4c9ca2645bd
Signed-off-by: Vincenzo Riccobene <vincenzo.m.riccobene@intel.com>
Signed-off-by: Vincenzo Riccobene <vincenzox.m.riccobene@intel.com>
yardstick/vTC/apexlake/experimental_framework/api.py
yardstick/vTC/apexlake/experimental_framework/benchmarking_unit.py
yardstick/vTC/apexlake/tests/api_test.py
yardstick/vTC/apexlake/tests/benchmarking_unit_test.py
yardstick/vTC/apexlake/tests/experiment_test.py [deleted file]
yardstick/vTC/apexlake/tests/instantiation_validation_noisy_bench_test.py

index 635dcd2..1851f1b 100644 (file)
@@ -27,18 +27,18 @@ class FrameworkApi(object):
         """
         common.init(api=True)
 
-    @staticmethod
-    def get_available_test_cases():
-        """
-        Returns a list of available test cases.
-        This list include eventual modules developed by the user, if any.
-        Each test case is returned as a string that represents the full name
-        of the test case and that can be used to get more information
-        calling get_test_case_features(test_case_name)
-
-        :return: list of strings
-        """
-        return b_unit.BenchmarkingUnit.get_available_test_cases()
+    @staticmethod
+    def get_available_test_cases():
+        """
+        Returns a list of available test cases.
+        This list include eventual modules developed by the user, if any.
+        Each test case is returned as a string that represents the full name
+        of the test case and that can be used to get more information
+        calling get_test_case_features(test_case_name)
+    #
+        :return: list of strings
+        """
+        return b_unit.BenchmarkingUnit.get_available_test_cases()
 
     @staticmethod
     def get_test_case_features(test_case):
index 1a19826..1963696 100644 (file)
@@ -188,7 +188,7 @@ class BenchmarkingUnit:
                     for key in benchmark.get_params():
                         experiment[key] = benchmark.get_params()[key]
                 common.LOG.info('Benchmark Finished')
-                self.data_manager.generate_result_csv_file()
+                self.data_manager.generate_result_csv_file()
         common.LOG.info('Benchmarking Unit: Experiments completed!')
         return result
 
index e3d5a8b..4b70b9b 100644 (file)
@@ -80,35 +80,34 @@ class TestGeneratesTemplate(unittest.TestCase):
     #     output = FrameworkApi.get_available_test_cases()
     #     self.assertEqual(expected, output)
 
-    # @mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit.'
-    #             'get_required_benchmarks',
-    #             side_effect=DummyBenchmarkingUnit.get_required_benchmarks)
-    # def test_get_test_case_features_for_success(self, mock_get_req_bench):
-    #
-    #     expected = dict()
-    #     expected['description'] = 'Instantiation Validation Benchmark'
-    #     expected['parameters'] = [
-    #         iv.THROUGHPUT,
-    #         iv.VLAN_SENDER,
-    #         iv.VLAN_RECEIVER]
-    #     expected['allowed_values'] = dict()
-    #     expected['allowed_values'][iv.THROUGHPUT] = \
-    #         map(str, range(0, 100))
-    #     expected['allowed_values'][iv.VLAN_SENDER] = \
-    #         map(str, range(-1, 4096))
-    #     expected['allowed_values'][iv.VLAN_RECEIVER] = \
-    #         map(str, range(-1, 4096))
-    #     expected['default_values'] = dict()
-    #     expected['default_values'][iv.THROUGHPUT] = '1'
-    #     expected['default_values'][iv.VLAN_SENDER] = '-1'
-    #     expected['default_values'][iv.VLAN_RECEIVER] = '-1'
-    #
-    #     test_case = 'instantiation_validation_benchmark.' \
-    #                 'InstantiationValidationBenchmark'
-    #     output = FrameworkApi.get_test_case_features(test_case)
-    #     self.assertEqual(expected, output)
-
-    def test____for_failure(self):
+    @mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit.'
+                'get_required_benchmarks',
+                side_effect=DummyBenchmarkingUnit.get_required_benchmarks)
+    def test_get_test_case_features_for_success(self, mock_get_req_bench):
+        expected = dict()
+        expected['description'] = 'Instantiation Validation Benchmark'
+        expected['parameters'] = [
+            iv.THROUGHPUT,
+            iv.VLAN_SENDER,
+            iv.VLAN_RECEIVER]
+        expected['allowed_values'] = dict()
+        expected['allowed_values'][iv.THROUGHPUT] = \
+            map(str, range(0, 100))
+        expected['allowed_values'][iv.VLAN_SENDER] = \
+            map(str, range(-1, 4096))
+        expected['allowed_values'][iv.VLAN_RECEIVER] = \
+            map(str, range(-1, 4096))
+        expected['default_values'] = dict()
+        expected['default_values'][iv.THROUGHPUT] = '1'
+        expected['default_values'][iv.VLAN_SENDER] = '-1'
+        expected['default_values'][iv.VLAN_RECEIVER] = '-1'
+
+        test_case = 'instantiation_validation_benchmark.' \
+                    'InstantiationValidationBenchmark'
+        output = FrameworkApi.get_test_case_features(test_case)
+        self.assertEqual(expected, output)
+
+    def test__get_test_case_features__for_failure(self):
         self.assertRaises(
             ValueError, FrameworkApi.get_test_case_features, 111)
 
index b0f800a..ccf6406 100644 (file)
@@ -18,59 +18,59 @@ __author__ = 'vmriccox'
 import unittest
 import mock
 from experimental_framework.benchmarking_unit import BenchmarkingUnit
-from experimental_framework.data_manager import DataManager
+from experimental_framework.data_manager import DataManager
 from experimental_framework.deployment_unit import DeploymentUnit
 import experimental_framework.common as common
 from experimental_framework.benchmarks.rfc2544_throughput_benchmark import \
     RFC2544ThroughputBenchmark
 
 
-class DummyDataManager(DataManager):
-
-    def __init__(self, experiment_directory):
-        self.experiment_directory = experiment_directory
-        self.experiments = dict()
-        self.new_exp_counter = 0
-        self.add_bench_counter = 0
-        self.close_experiment_1_counter = 0
-        self.close_experiment_2_counter = 0
-        self.generate_csv_counter = 0
-
-    def create_new_experiment(self, experiment_name, get_counter=None):
-        if not get_counter:
-            self.new_exp_counter += 1
-        else:
-            return self.new_exp_counter
-
-    def add_benchmark(self, experiment_name, benchmark_name, get_counter=None):
-        if not get_counter:
-            self.add_bench_counter += 1
-        else:
-            return self.add_bench_counter
-
-    def close_experiment(self, experiment, get_counter=None):
-        if get_counter:
-            return [self.close_experiment_1_counter,
-                    self.close_experiment_2_counter]
-        if experiment == 'VTC_base_single_vm_wait_1':
-            self.close_experiment_1_counter += 1
-        if experiment == 'VTC_base_single_vm_wait_2':
-            self.close_experiment_2_counter += 1
-
-    def generate_result_csv_file(self, get_counter=None):
-        if get_counter:
-            return self.generate_csv_counter
-        else:
-            self.generate_csv_counter += 1
-
-    def add_metadata(self, experiment_name, metadata):
-        pass
-
-    def add_configuration(self, experiment_name, configuration):
-        pass
-
-    def add_data_points(self, experiment_name, benchmark_name, result):
-        pass
+class DummyDataManager(DataManager):
+#
+    def __init__(self, experiment_directory):
+        self.experiment_directory = experiment_directory
+        self.experiments = dict()
+        self.new_exp_counter = 0
+        self.add_bench_counter = 0
+        self.close_experiment_1_counter = 0
+        self.close_experiment_2_counter = 0
+        self.generate_csv_counter = 0
+#
+    def create_new_experiment(self, experiment_name, get_counter=None):
+        if not get_counter:
+            self.new_exp_counter += 1
+        else:
+            return self.new_exp_counter
+#
+#   def add_benchmark(self, experiment_name, benchmark_name, get_counter=None):
+        if not get_counter:
+            self.add_bench_counter += 1
+        else:
+            return self.add_bench_counter
+#
+    def close_experiment(self, experiment, get_counter=None):
+        if get_counter:
+            return [self.close_experiment_1_counter,
+                    self.close_experiment_2_counter]
+        if experiment == 'VTC_base_single_vm_wait_1':
+            self.close_experiment_1_counter += 1
+        if experiment == 'VTC_base_single_vm_wait_2':
+            self.close_experiment_2_counter += 1
+#
+    def generate_result_csv_file(self, get_counter=None):
+        if get_counter:
+            return self.generate_csv_counter
+        else:
+            self.generate_csv_counter += 1
+#
+    def add_metadata(self, experiment_name, metadata):
+        pass
+#
+    def add_configuration(self, experiment_name, configuration):
+        pass
+#
+    def add_data_points(self, experiment_name, benchmark_name, result):
+        pass
 
 
 class Dummy_2544(RFC2544ThroughputBenchmark):
@@ -122,12 +122,13 @@ class TestBenchmarkingUnit(unittest.TestCase):
 
     @mock.patch('time.time')
     @mock.patch('experimental_framework.common.get_template_dir')
-    @mock.patch('experimental_framework.data_manager.DataManager',
-                side_effect=DummyDataManager)
+    @mock.patch('experimental_framework.data_manager.DataManager',
+                side_effect=DummyDataManager)
     @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
     @mock.patch('experimental_framework.benchmarking_unit.heat.'
                 'get_all_heat_templates')
-    def test___init__(self, mock_heat, mock_dep_unit, mock_data_manager,
+    def test___init__(self, mock_heat, mock_dep_unit,
+                      # mock_data_manager,
                       mock_temp_dir, mock_time):
         mock_heat.return_value = list()
         mock_time.return_value = '12345'
@@ -152,7 +153,7 @@ class TestBenchmarkingUnit(unittest.TestCase):
                               benchmarks)
         self.assertEqual(bu.required_benchmarks, benchmarks)
         bu.heat_template_parameters = heat_template_parameters
-        mock_data_manager.assert_called_once_with('tests/data/results/12345')
+        mock_data_manager.assert_called_once_with('tests/data/results/12345')
         mock_dep_unit.assert_called_once_with(openstack_credentials)
         mock_heat.assert_called_once_with('tests/data/results/', '.ext')
 
@@ -160,13 +161,14 @@ class TestBenchmarkingUnit(unittest.TestCase):
                 'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
     @mock.patch('time.time')
     @mock.patch('experimental_framework.common.get_template_dir')
-    @mock.patch('experimental_framework.data_manager.DataManager',
-                side_effect=DummyDataManager)
+    @mock.patch('experimental_framework.data_manager.DataManager',
+                side_effect=DummyDataManager)
     @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
     @mock.patch('experimental_framework.benchmarking_unit.'
                 'heat.get_all_heat_templates')
     def test_initialize_for_success(self, mock_heat, mock_dep_unit,
-                                    mock_data_manager, mock_temp_dir,
+                                    # mock_data_manager,
+                                    mock_temp_dir,
                                     mock_time, mock_rfc2544):
         mock_heat.return_value = list()
         mock_time.return_value = '12345'
@@ -204,21 +206,22 @@ class TestBenchmarkingUnit(unittest.TestCase):
         self.assertTrue(len(bu.benchmarks) == 1)
         self.assertEqual(bu.benchmarks[0].__class__,
                          Dummy_2544)
-        self.assertEqual(bu.data_manager.create_new_experiment('', True), 2)
-        self.assertEqual(bu.data_manager.add_benchmark('', '', True), 2)
+        self.assertEqual(bu.data_manager.create_new_experiment('', True), 2)
+        self.assertEqual(bu.data_manager.add_benchmark('', '', True), 2)
 
     @mock.patch('experimental_framework.benchmarks.'
                 'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
     @mock.patch('time.time')
     @mock.patch('experimental_framework.common.get_template_dir')
-    @mock.patch('experimental_framework.data_manager.DataManager',
-                side_effect=DummyDataManager)
+    @mock.patch('experimental_framework.data_manager.DataManager',
+                side_effect=DummyDataManager)
     @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
     @mock.patch('experimental_framework.benchmarking_unit.'
                 'heat.get_all_heat_templates')
     def test_finalize_for_success(
-            self, mock_heat, mock_dep_unit, mock_data_manager, mock_temp_dir,
-            mock_time, mock_rfc2544):
+            self, mock_heat, mock_dep_unit,
+            # mock_data_manager,
+            mock_temp_dir, mock_time, mock_rfc2544):
         mock_heat.return_value = list()
         mock_time.return_value = '12345'
         mock_temp_dir.return_value = 'tests/data/test_templates/'
@@ -252,7 +255,7 @@ class TestBenchmarkingUnit(unittest.TestCase):
                              'VTC_base_single_vm_wait_2.yaml']
         bu.finalize()
         # self.assertEqual(bu.data_manager.close_experiment('', True), [1, 1])
-        self.assertEqual(bu.data_manager.generate_result_csv_file(True), 1)
+        self.assertEqual(bu.data_manager.generate_result_csv_file(True), 1)
 
     @mock.patch('experimental_framework.common.push_data_influxdb')
     @mock.patch('experimental_framework.common.LOG')
@@ -260,14 +263,15 @@ class TestBenchmarkingUnit(unittest.TestCase):
                 'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
     @mock.patch('time.time')
     @mock.patch('experimental_framework.common.get_template_dir')
-    @mock.patch('experimental_framework.data_manager.DataManager',
-                side_effect=DummyDataManager)
+    @mock.patch('experimental_framework.data_manager.DataManager',
+                side_effect=DummyDataManager)
     @mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
     @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
     @mock.patch('experimental_framework.benchmarking_unit.'
                 'heat.get_all_heat_templates')
     def test_run_benchmarks_for_success(self, mock_heat, mock_common_dep_unit,
-                                        mock_dep_unit, mock_data_manager,
+                                        mock_dep_unit,
+                                        # mock_data_manager,
                                         mock_temp_dir, mock_time,
                                         mock_rfc2544, mock_log, mock_influx):
         mock_heat.return_value = list()
@@ -301,7 +305,7 @@ class TestBenchmarkingUnit(unittest.TestCase):
                               heat_template_parameters,
                               iterations,
                               benchmarks)
-        bu.data_manager = DummyDataManager('tests/data/results/12345')
+        bu.data_manager = DummyDataManager('tests/data/results/12345')
         bu.template_files = ['VTC_base_single_vm_wait_1.yaml',
                              'VTC_base_single_vm_wait_2.yaml']
         bu.benchmarks = [Dummy_2544('dummy', {'param1': 'val1'})]
@@ -320,15 +324,16 @@ class TestBenchmarkingUnit(unittest.TestCase):
                 'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
     @mock.patch('time.time')
     @mock.patch('experimental_framework.common.get_template_dir')
-    @mock.patch('experimental_framework.data_manager.DataManager',
-                side_effect=DummyDataManager)
+    @mock.patch('experimental_framework.data_manager.DataManager',
+                side_effect=DummyDataManager)
     @mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
     @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
     @mock.patch('experimental_framework.benchmarking_unit.'
                 'heat.get_all_heat_templates')
     def test_run_benchmarks_2_for_success(
             self, mock_heat, mock_common_dep_unit, mock_dep_unit,
-            mock_data_manager, mock_temp_dir, mock_time, mock_rfc2544,
+            # mock_data_manager,
+            mock_temp_dir, mock_time, mock_rfc2544,
             mock_log):
         mock_heat.return_value = list()
         mock_time.return_value = '12345'
@@ -358,7 +363,7 @@ class TestBenchmarkingUnit(unittest.TestCase):
                               heat_template_parameters,
                               iterations,
                               benchmarks)
-        bu.data_manager = DummyDataManager('tests/data/results/12345')
+        bu.data_manager = DummyDataManager('tests/data/results/12345')
         bu.template_files = ['VTC_base_single_vm_wait_1.yaml',
                              'VTC_base_single_vm_wait_2.yaml']
         bu.benchmarks = [Dummy_2544('dummy', dict())]
@@ -373,15 +378,16 @@ class TestBenchmarkingUnit(unittest.TestCase):
                 'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
     @mock.patch('time.time')
     @mock.patch('experimental_framework.common.get_template_dir')
-    @mock.patch('experimental_framework.data_manager.DataManager',
-                side_effect=DummyDataManager)
+    @mock.patch('experimental_framework.data_manager.DataManager',
+                side_effect=DummyDataManager)
     @mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
     @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
     @mock.patch('experimental_framework.benchmarking_unit.'
                 'heat.get_all_heat_templates')
     def test_get_benchmark_name_for_success(
             self, mock_heat, mock_common_dep_unit, mock_dep_unit,
-            mock_data_manager, mock_temp_dir, mock_time, mock_rfc2544,
+            # mock_data_manager,
+            mock_temp_dir, mock_time, mock_rfc2544,
             mock_log):
         mock_heat.return_value = list()
         mock_time.return_value = '12345'
@@ -427,15 +433,16 @@ class TestBenchmarkingUnit(unittest.TestCase):
                 'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
     @mock.patch('time.time')
     @mock.patch('experimental_framework.common.get_template_dir')
-    @mock.patch('experimental_framework.data_manager.DataManager',
-                side_effect=DummyDataManager)
+    @mock.patch('experimental_framework.data_manager.DataManager',
+                side_effect=DummyDataManager)
     @mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
     @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
     @mock.patch('experimental_framework.benchmarking_unit.'
                 'heat.get_all_heat_templates')
     def test_get_required_benchmarks_for_success(
             self, mock_heat, mock_common_dep_unit, mock_dep_unit,
-            mock_data_manager, mock_temp_dir, mock_time, mock_rfc2544,
+            # mock_data_manager,
+            mock_temp_dir, mock_time, mock_rfc2544,
             mock_log):
         mock_heat.return_value = list()
         mock_time.return_value = '12345'
diff --git a/yardstick/vTC/apexlake/tests/experiment_test.py b/yardstick/vTC/apexlake/tests/experiment_test.py
deleted file mode 100644 (file)
index 47d1fbb..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-__author__ = 'gpetralx'
-
-import unittest
-from experimental_framework import data_manager
-
-
-class TestExperiment(unittest.TestCase):
-    def setUp(self):
-        self.exp = data_manager.Experiment('experiment_1')
-
-    def tearDown(self):
-        pass
-
-    def test_add_experiment_metadata(self):
-        with self.assertRaises(ValueError):
-            self.exp.add_experiment_metadata('metadata')
-
-        metadata = {
-            'item_1': 'value_1',
-            'item_2': 'value_2',
-            'item_3': 'value_3'
-        }
-        self.exp.add_experiment_metadata(metadata)
-        self.assertDictEqual(metadata, self.exp._metadata)
-        self.assertDictEqual(metadata, self.exp.get_metadata())
-
-    def test_experiment_configuration(self):
-        with self.assertRaises(ValueError):
-            self.exp.add_experiment_configuration('configuration')
-        configuration = {
-            'item_1': 'value_1',
-            'item_2': 'value_2',
-            'item_3': 'value_3'
-        }
-        self.exp.add_experiment_configuration(configuration)
-        self.assertDictEqual(configuration, self.exp._configuration)
-        self.assertDictEqual(configuration, self.exp.get_configuration())
-
-    def test_add_benchmark(self):
-        with self.assertRaises(ValueError):
-            self.exp.add_benchmark(1)
-        self.exp.add_benchmark('benchmark_1')
-        self.assertListEqual(list(), self.exp._benchmarks['benchmark_1'])
-
-    def test_add_datapoint(self):
-        with self.assertRaises(ValueError):
-            self.exp.add_data_point('benchmark_1', 'datapoint')
-
-        data_point_1 = {
-            'point_1': 'value_1',
-            'point_2': 'value_2',
-            'point_3': 'value_3'
-        }
-
-        with self.assertRaises(ValueError):
-            self.exp.add_data_point('benchmark_1', data_point_1)
-
-        self.exp.add_benchmark('benchmark_1')
-        self.exp.add_data_point('benchmark_1', data_point_1)
-        self.assertListEqual([data_point_1],
-                             self.exp._benchmarks['benchmark_1'])
-
-    def test_get_data_points(self):
-        self.assertListEqual(list(), self.exp.get_data_points('benchmark_1'))
-        data_point_1 = {
-            'point_1': 'value_1',
-            'point_2': 'value_2',
-            'point_3': 'value_3'
-        }
-        self.exp.add_benchmark('benchmark_1')
-        self.exp.add_data_point('benchmark_1', data_point_1)
-        self.assertListEqual([data_point_1],
-                             self.exp.get_data_points('benchmark_1'))
-
-    def test_get_benchmarks(self):
-        self.exp.add_benchmark('benchmark_1')
-        self.exp.add_benchmark('benchmark_2')
-        self.exp.add_benchmark('benchmark_3')
-        expected = ['benchmark_3', 'benchmark_2', 'benchmark_1']
-        self.assertListEqual(expected, self.exp.get_benchmarks())
index cdcce37..4630357 100644 (file)
@@ -78,14 +78,14 @@ class InstantiationValidationInitTest(unittest.TestCase):
         expected['allowed_values'][mut.NUMBER_OF_CORES] = \
             ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
         expected['allowed_values'][mut.AMOUNT_OF_RAM] = \
-            ['250M', '1G', '2G', '3G', '4G', '5G', '6G', '7G', '8G', '9G',
+            ['256M', '1G', '2G', '3G', '4G', '5G', '6G', '7G', '8G', '9G',
              '10G']
         expected['default_values']['throughput'] = '1'
         expected['default_values']['vlan_sender'] = '-1'
         expected['default_values']['vlan_receiver'] = '-1'
         expected['default_values'][mut.NUM_OF_NEIGHBORS] = '1'
         expected['default_values'][mut.NUMBER_OF_CORES] = '1'
-        expected['default_values'][mut.AMOUNT_OF_RAM] = '250M'
+        expected['default_values'][mut.AMOUNT_OF_RAM] = '256M'
         output = self.iv.get_features()
         self.assertEqual(expected['description'], output['description'])