Merge "Add unittest framework for Yardstick API"
[yardstick.git] / yardstick / vTC / apexlake / experimental_framework / benchmarking_unit.py
1 # Copyright (c) 2015 Intel Research and Development Ireland Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 The Benchmarking Unit manages the Benchmarking of VNFs orchestrating the
17 initialization, execution and finalization
18 """
19
20
21 from __future__ import absolute_import
22 import json
23 import time
24 import inspect
25
26 from experimental_framework.benchmarks import benchmark_base_class as base
27 from experimental_framework import common
28 # from experimental_framework import data_manager as data
29 from experimental_framework import heat_template_generation as heat
30 from experimental_framework import deployment_unit as deploy
31 from six.moves import range
32
33
34 class BenchmarkingUnit:
35     """
36     Management of the overall Benchmarking process
37     """
38
39     def __init__(self, heat_template_name, openstack_credentials,
40                  heat_template_parameters, iterations, benchmarks):
41         """
42         :param heat_template_name: (str) Name of the heat template.
43
44         :param openstack_credentials: (dict) Credentials for openstack.
45                         Required fields are: 'ip_controller', 'heat_url',
46                         'user', 'password', 'auth_uri', 'project'.
47
48         :param heat_template_parameters: (dict) parameters to be given as
49                         input to the heat template. Required keys depend on
50                         the specific heat template.
51
52         :param iterations: (int) number of cycles to be executed.
53
54         :param benchmarks: (list[str]) List of the names of the
55                         benchmarks/test_cases to be executed in the cycle.
56
57         :return: None
58         """
59         # Loads vars from configuration file
60         self.template_file_extension = common.TEMPLATE_FILE_EXTENSION
61         self.template_dir = common.get_template_dir()
62         self.results_directory = str(common.RESULT_DIR) + str(time.time())
63
64         # Initializes other internal variable from parameters
65         self.template_name = heat_template_name
66         self.iterations = iterations
67         self.required_benchmarks = benchmarks
68         self.template_files = []
69         self.benchmarks = list()
70         self.benchmark_names = list()
71         # self.data_manager = data.DataManager(self.results_directory)
72         self.heat_template_parameters = heat_template_parameters
73         self.template_files = \
74             heat.get_all_heat_templates(self.template_dir,
75                                         self.template_file_extension)
76         common.DEPLOYMENT_UNIT = deploy.DeploymentUnit(openstack_credentials)
77
78     def initialize(self):
79         """
80         Initialize the environment in order to run the benchmarking
81
82         :return: None
83         """
84         for benchmark in self.required_benchmarks:
85             benchmark_class = BenchmarkingUnit.get_benchmark_class(
86                 benchmark['name'])
87             # Need to generate a unique name for the benchmark
88             # (since there is the possibility to have different
89             # instances of the same benchmark)
90             self.benchmarks.append(benchmark_class(
91                 self.get_benchmark_name(benchmark['name']),
92                 benchmark['params']))
93
94         # for template_file_name in self.template_files:
95         #     experiment_name = BenchmarkingUnit.extract_experiment_name(
96         #         template_file_name)
97             # self.data_manager.create_new_experiment(experiment_name)
98             # for benchmark in self.benchmarks:
99             #     self.data_manager.add_benchmark(experiment_name,
100             #                                    benchmark.get_name())
101
102     def finalize(self):
103         """
104         Finalizes the Benchmarking Unit
105         Destroys all the stacks deployed by the framework and save results on
106         csv file.
107
108         :return: None
109         """
110         # self.data_manager.generate_result_csv_file()
111         common.DEPLOYMENT_UNIT.destroy_all_deployed_stacks()
112
113     def run_benchmarks(self):
114         """
115         Runs all the requested benchmarks and collect the results.
116
117         :return: None
118         """
119         common.LOG.info('Run Benchmarking Unit')
120
121         experiment = {}
122         result = {}
123         for iteration in range(self.iterations):
124             common.LOG.info('Iteration %s', iteration)
125             for template_file_name in self.template_files:
126                 experiment_name = BenchmarkingUnit.\
127                     extract_experiment_name(template_file_name)
128                 experiment['experiment_name'] = experiment_name
129                 configuration = self.\
130                     get_experiment_configuration(template_file_name)
131                 # self.data_manager.add_configuration(experiment_name,
132                 #                                     configuration)
133                 for key in configuration.keys():
134                     experiment[key] = configuration[key]
135                 # metadata = dict()
136                 # metadata['experiment_name'] = experiment_name
137                 # self.data_manager.add_metadata(experiment_name, metadata)
138
139                 # For each benchmark in the cycle the workload is deployed
140                 for benchmark in self.benchmarks:
141                     log_msg = 'Benchmark {} started on {}'.format(
142                         benchmark.get_name(), template_file_name
143                     )
144                     common.LOG.info(log_msg)
145
146                     # Initialization of Benchmark
147                     benchmark.init()
148                     log_msg = 'Template {} deployment START'.\
149                         format(experiment_name)
150                     common.LOG.info(log_msg)
151
152                     # Deployment of the workload
153                     deployment_success = \
154                         common.DEPLOYMENT_UNIT.deploy_heat_template(
155                             self.template_dir + template_file_name,
156                             experiment_name,
157                             self.heat_template_parameters)
158
159                     if deployment_success:
160                         log_msg = 'Template {} deployment COMPLETED'.format(
161                             experiment_name)
162                         common.LOG.info(log_msg)
163                     else:
164                         log_msg = 'Template {} deployment FAILED'.format(
165                             experiment_name)
166                         common.LOG.info(log_msg)
167                         continue
168
169                     # Running the Benchmark/test case
170                     result = benchmark.run()
171                     # self.data_manager.add_data_points(experiment_name,
172                     #                                   benchmark.get_name(),
173                     #                                   result)
174
175                     # Terminate the workload
176                     log_msg = 'Destroying deployment for experiment {}'.\
177                         format(experiment_name)
178                     common.LOG.info(log_msg)
179                     common.DEPLOYMENT_UNIT.destroy_heat_template(
180                         experiment_name)
181
182                     # Finalize the benchmark
183                     benchmark.finalize()
184                     log_msg = 'Benchmark {} terminated'.format(
185                         benchmark.__class__.__name__)
186                     common.LOG.info(log_msg)
187                     # self.data_manager.generate_result_csv_file()
188
189                     experiment['benchmark'] = benchmark.get_name()
190                     for key in benchmark.get_params():
191                         experiment[key] = benchmark.get_params()[key]
192                 common.LOG.info('Benchmark Finished')
193                 # self.data_manager.generate_result_csv_file()
194         common.LOG.info('Benchmarking Unit: Experiments completed!')
195         return result
196
197     def get_experiment_configuration(self, template_file_name):
198         """
199         Reads and returns the configuration for the specific experiment
200         (heat template)
201
202         :param template_file_name: (str) Name of the file for the heat
203                         template for which it is requested the configuration
204
205         :return: dict() Configuration parameters and values
206         """
207         file_name = "{}{}.json".format(self.template_dir, template_file_name)
208         with open(file_name) as json_file:
209             configuration = json.load(json_file)
210         return configuration
211
212     def get_benchmark_name(self, name, instance=0):
213         """
214         Returns the name to be used for the benchmark/test case (TC).
215         This is required since each benchmark/TC could be run more than once
216         within the same cycle, with different initialization parameters.
217         In order to distinguish between them, a unique name is generated.
218
219         :param name: (str) original name of the benchmark/TC
220
221         :param instance: (int) number of instance already in the queue for
222                         this type of benchmark/TC.
223
224         :return: (str) name to be assigned to the benchmark/TC
225         """
226         if name + "_" + str(instance) in self.benchmark_names:
227             instance += 1
228             return self.get_benchmark_name(name, instance)
229         self.benchmark_names.append(name + "_" + str(instance))
230         return name + "_" + str(instance)
231
232     @staticmethod
233     def extract_experiment_name(template_file_name):
234         """
235         Generates a unique experiment name for a given template.
236
237         :param template_file_name: (str) File name of the template used
238                         during the experiment string
239
240         :return: (str) Experiment Name
241         """
242         strings = template_file_name.split('.')
243         return ".".join(strings[:(len(strings) - 1)])
244
245     @staticmethod
246     def get_benchmark_class(complete_module_name):
247         """
248         Returns the classes included in a given module.
249
250         :param complete_module_name: (str) Complete name of the module as
251                         returned by get_available_test_cases.
252
253         :return: Class related to the benchmark/TC present in the requested
254                         module.
255         """
256         strings = complete_module_name.split('.')
257         class_name = 'experimental_framework.benchmarks.{}'.format(strings[0])
258         pkg = __import__(class_name, globals(), locals(), [], 0)
259         module = getattr(getattr(pkg, 'benchmarks'), strings[0])
260         members = inspect.getmembers(module)
261         for m in members:
262             if inspect.isclass(m[1]):
263                 class_name = m[1]("", dict()).__class__.__name__
264                 if isinstance(m[1]("", dict()), base.BenchmarkBaseClass) and \
265                         not class_name == 'BenchmarkBaseClass':
266                     return m[1]
267
268     @staticmethod
269     def get_required_benchmarks(required_benchmarks):
270         """
271         Returns instances of required test cases.
272
273         :param required_benchmarks: (list() of strings) Benchmarks to be
274                         executed by the experimental framework.
275
276         :return: list() of BenchmarkBaseClass
277         """
278         benchmarks = list()
279         for b in required_benchmarks:
280             class_ = BenchmarkingUnit.get_benchmark_class(b)
281             instance = class_("", dict())
282             benchmarks.append(instance)
283         return benchmarks