Remove data manager from ApexLake
[yardstick.git] / yardstick / vTC / apexlake / experimental_framework / benchmarking_unit.py
1 # Copyright (c) 2015 Intel Research and Development Ireland Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 '''
16 The Benchmarking Unit manages the Benchmarking of VNFs orchestrating the
17 initialization, execution and finalization
18 '''
19
20
21 import json
22 import time
23 import inspect
24
25 from experimental_framework.benchmarks import benchmark_base_class as base
26 from experimental_framework import common
27 # from experimental_framework import data_manager as data
28 from experimental_framework import heat_template_generation as heat
29 from experimental_framework import deployment_unit as deploy
30
31
32 class BenchmarkingUnit:
33     """
34     Management of the overall Benchmarking process
35     """
36
37     def __init__(self, heat_template_name, openstack_credentials,
38                  heat_template_parameters, iterations, benchmarks):
39         """
40         :param heat_template_name: (str) Name of the heat template.
41
42         :param openstack_credentials: (dict) Credentials for openstack.
43                         Required fields are: 'ip_controller', 'heat_url',
44                         'user', 'password', 'auth_uri', 'project'.
45
46         :param heat_template_parameters: (dict) parameters to be given as
47                         input to the heat template. Required keys depend on
48                         the specific heat template.
49
50         :param iterations: (int) number of cycles to be executed.
51
52         :param benchmarks: (list[str]) List of the names of the
53                         benchmarks/test_cases to be executed in the cycle.
54
55         :return: None
56         """
57         # Loads vars from configuration file
58         self.template_file_extension = common.TEMPLATE_FILE_EXTENSION
59         self.template_dir = common.get_template_dir()
60         self.results_directory = str(common.RESULT_DIR) + str(time.time())
61
62         # Initializes other internal variable from parameters
63         self.template_name = heat_template_name
64         self.iterations = iterations
65         self.required_benchmarks = benchmarks
66         self.template_files = []
67         self.benchmarks = list()
68         self.benchmark_names = list()
69         # self.data_manager = data.DataManager(self.results_directory)
70         self.heat_template_parameters = heat_template_parameters
71         self.template_files = \
72             heat.get_all_heat_templates(self.template_dir,
73                                         self.template_file_extension)
74         common.DEPLOYMENT_UNIT = deploy.DeploymentUnit(openstack_credentials)
75
76     def initialize(self):
77         """
78         Initialize the environment in order to run the benchmarking
79
80         :return: None
81         """
82         for benchmark in self.required_benchmarks:
83             benchmark_class = BenchmarkingUnit.get_benchmark_class(
84                 benchmark['name'])
85             # Need to generate a unique name for the benchmark
86             # (since there is the possibility to have different
87             # instances of the same benchmark)
88             self.benchmarks.append(benchmark_class(
89                 self.get_benchmark_name(benchmark['name']),
90                 benchmark['params']))
91
92         # for template_file_name in self.template_files:
93         #     experiment_name = BenchmarkingUnit.extract_experiment_name(
94         #         template_file_name)
95             # self.data_manager.create_new_experiment(experiment_name)
96             # for benchmark in self.benchmarks:
97             #     self.data_manager.add_benchmark(experiment_name,
98             #                                    benchmark.get_name())
99
100     def finalize(self):
101         """
102         Finalizes the Benchmarking Unit
103         Destroys all the stacks deployed by the framework and save results on
104         csv file.
105
106         :return: None
107         """
108         # self.data_manager.generate_result_csv_file()
109         common.DEPLOYMENT_UNIT.destroy_all_deployed_stacks()
110
111     def run_benchmarks(self):
112         """
113         Runs all the requested benchmarks and collect the results.
114
115         :return: None
116         """
117         common.LOG.info('Run Benchmarking Unit')
118
119         experiment = dict()
120         result = dict()
121         for iteration in range(0, self.iterations):
122             common.LOG.info('Iteration ' + str(iteration))
123             for template_file_name in self.template_files:
124                 experiment_name = BenchmarkingUnit.\
125                     extract_experiment_name(template_file_name)
126                 experiment['experiment_name'] = experiment_name
127                 configuration = self.\
128                     get_experiment_configuration(template_file_name)
129                 # self.data_manager.add_configuration(experiment_name,
130                 #                                     configuration)
131                 for key in configuration.keys():
132                     experiment[key] = configuration[key]
133                 # metadata = dict()
134                 # metadata['experiment_name'] = experiment_name
135                 # self.data_manager.add_metadata(experiment_name, metadata)
136
137                 # For each benchmark in the cycle the workload is deployed
138                 for benchmark in self.benchmarks:
139                     log_msg = 'Benchmark {} started on {}'.format(
140                         benchmark.get_name(), template_file_name
141                     )
142                     common.LOG.info(log_msg)
143
144                     # Initialization of Benchmark
145                     benchmark.init()
146                     log_msg = 'Template {} deployment START'.\
147                         format(experiment_name)
148                     common.LOG.info(log_msg)
149
150                     # Deployment of the workload
151                     deployment_success = \
152                         common.DEPLOYMENT_UNIT.deploy_heat_template(
153                             self.template_dir + template_file_name,
154                             experiment_name,
155                             self.heat_template_parameters)
156
157                     if deployment_success:
158                         log_msg = 'Template {} deployment COMPLETED'.format(
159                             experiment_name)
160                         common.LOG.info(log_msg)
161                     else:
162                         log_msg = 'Template {} deployment FAILED'.format(
163                             experiment_name)
164                         common.LOG.info(log_msg)
165                         continue
166
167                     # Running the Benchmark/test case
168                     result = benchmark.run()
169                     # self.data_manager.add_data_points(experiment_name,
170                     #                                   benchmark.get_name(),
171                     #                                   result)
172
173                     # Terminate the workload
174                     log_msg = 'Destroying deployment for experiment {}'.\
175                         format(experiment_name)
176                     common.LOG.info(log_msg)
177                     common.DEPLOYMENT_UNIT.destroy_heat_template(
178                         experiment_name)
179
180                     # Finalize the benchmark
181                     benchmark.finalize()
182                     log_msg = 'Benchmark {} terminated'.format(
183                         benchmark.__class__.__name__)
184                     common.LOG.info(log_msg)
185                     # self.data_manager.generate_result_csv_file()
186
187                     experiment['benchmark'] = benchmark.get_name()
188                     for key in benchmark.get_params():
189                         experiment[key] = benchmark.get_params()[key]
190                 common.LOG.info('Benchmark Finished')
191                 self.data_manager.generate_result_csv_file()
192         common.LOG.info('Benchmarking Unit: Experiments completed!')
193         return result
194
195     def get_experiment_configuration(self, template_file_name):
196         """
197         Reads and returns the configuration for the specific experiment
198         (heat template)
199
200         :param template_file_name: (str) Name of the file for the heat
201                         template for which it is requested the configuration
202
203         :return: dict() Configuration parameters and values
204         """
205         file_name = "{}{}.json".format(self.template_dir, template_file_name)
206         with open(file_name) as json_file:
207             configuration = json.load(json_file)
208         return configuration
209
210     def get_benchmark_name(self, name, instance=0):
211         """
212         Returns the name to be used for the benchmark/test case (TC).
213         This is required since each benchmark/TC could be run more than once
214         within the same cycle, with different initialization parameters.
215         In order to distinguish between them, a unique name is generated.
216
217         :param name: (str) original name of the benchmark/TC
218
219         :param instance: (int) number of instance already in the queue for
220                         this type of benchmark/TC.
221
222         :return: (str) name to be assigned to the benchmark/TC
223         """
224         if name + "_" + str(instance) in self.benchmark_names:
225             instance += 1
226             return self.get_benchmark_name(name, instance)
227         self.benchmark_names.append(name + "_" + str(instance))
228         return name + "_" + str(instance)
229
230     @staticmethod
231     def extract_experiment_name(template_file_name):
232         """
233         Generates a unique experiment name for a given template.
234
235         :param template_file_name: (str) File name of the template used
236                         during the experiment string
237
238         :return: (str) Experiment Name
239         """
240         strings = template_file_name.split('.')
241         return ".".join(strings[:(len(strings)-1)])
242
243     @staticmethod
244     def get_benchmark_class(complete_module_name):
245         """
246         Returns the classes included in a given module.
247
248         :param complete_module_name: (str) Complete name of the module as
249                         returned by get_available_test_cases.
250
251         :return: Class related to the benchmark/TC present in the requested
252                         module.
253         """
254         strings = complete_module_name.split('.')
255         class_name = 'experimental_framework.benchmarks.{}'.format(strings[0])
256         pkg = __import__(class_name, globals(), locals(), [], -1)
257         module = getattr(getattr(pkg, 'benchmarks'), strings[0])
258         members = inspect.getmembers(module)
259         for m in members:
260             if inspect.isclass(m[1]):
261                 class_name = m[1]("", dict()).__class__.__name__
262                 if isinstance(m[1]("", dict()), base.BenchmarkBaseClass) and \
263                         not class_name == 'BenchmarkBaseClass':
264                     return m[1]
265
266     @staticmethod
267     def get_required_benchmarks(required_benchmarks):
268         """
269         Returns instances of required test cases.
270
271         :param required_benchmarks: (list() of strings) Benchmarks to be
272                         executed by the experimental framework.
273
274         :return: list() of BenchmarkBaseClass
275         """
276         benchmarks = list()
277         for b in required_benchmarks:
278             class_ = BenchmarkingUnit.get_benchmark_class(b)
279             instance = class_("", dict())
280             benchmarks.append(instance)
281         return benchmarks