1 # Copyright (c) 2015 Intel Research and Development Ireland Ltd.
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
16 The Benchmarking Unit manages the Benchmarking of VNFs orchestrating the
17 initialization, execution and finalization
21 from __future__ import absolute_import
26 from experimental_framework.benchmarks import benchmark_base_class as base
27 from experimental_framework import common
28 # from experimental_framework import data_manager as data
29 from experimental_framework import heat_template_generation as heat
30 from experimental_framework import deployment_unit as deploy
31 from six.moves import range
34 class BenchmarkingUnit:
36 Management of the overall Benchmarking process
39 def __init__(self, heat_template_name, openstack_credentials,
40 heat_template_parameters, iterations, benchmarks):
42 :param heat_template_name: (str) Name of the heat template.
44 :param openstack_credentials: (dict) Credentials for openstack.
45 Required fields are: 'ip_controller', 'heat_url',
46 'user', 'password', 'auth_uri', 'project'.
48 :param heat_template_parameters: (dict) parameters to be given as
49 input to the heat template. Required keys depend on
50 the specific heat template.
52 :param iterations: (int) number of cycles to be executed.
54 :param benchmarks: (list[str]) List of the names of the
55 benchmarks/test_cases to be executed in the cycle.
59 # Loads vars from configuration file
60 self.template_file_extension = common.TEMPLATE_FILE_EXTENSION
61 self.template_dir = common.get_template_dir()
62 self.results_directory = str(common.RESULT_DIR) + str(time.time())
64 # Initializes other internal variable from parameters
65 self.template_name = heat_template_name
66 self.iterations = iterations
67 self.required_benchmarks = benchmarks
68 self.template_files = []
69 self.benchmarks = list()
70 self.benchmark_names = list()
71 # self.data_manager = data.DataManager(self.results_directory)
72 self.heat_template_parameters = heat_template_parameters
73 self.template_files = \
74 heat.get_all_heat_templates(self.template_dir,
75 self.template_file_extension)
76 common.DEPLOYMENT_UNIT = deploy.DeploymentUnit(openstack_credentials)
80 Initialize the environment in order to run the benchmarking
84 for benchmark in self.required_benchmarks:
85 benchmark_class = BenchmarkingUnit.get_benchmark_class(
87 # Need to generate a unique name for the benchmark
88 # (since there is the possibility to have different
89 # instances of the same benchmark)
90 self.benchmarks.append(benchmark_class(
91 self.get_benchmark_name(benchmark['name']),
94 # for template_file_name in self.template_files:
95 # experiment_name = BenchmarkingUnit.extract_experiment_name(
97 # self.data_manager.create_new_experiment(experiment_name)
98 # for benchmark in self.benchmarks:
99 # self.data_manager.add_benchmark(experiment_name,
100 # benchmark.get_name())
104 Finalizes the Benchmarking Unit
105 Destroys all the stacks deployed by the framework and save results on
110 # self.data_manager.generate_result_csv_file()
111 common.DEPLOYMENT_UNIT.destroy_all_deployed_stacks()
113 def run_benchmarks(self):
115 Runs all the requested benchmarks and collect the results.
119 common.LOG.info('Run Benchmarking Unit')
123 for iteration in range(self.iterations):
124 common.LOG.info('Iteration %s', iteration)
125 for template_file_name in self.template_files:
126 experiment_name = BenchmarkingUnit.\
127 extract_experiment_name(template_file_name)
128 experiment['experiment_name'] = experiment_name
129 configuration = self.\
130 get_experiment_configuration(template_file_name)
131 # self.data_manager.add_configuration(experiment_name,
133 for key in configuration.keys():
134 experiment[key] = configuration[key]
136 # metadata['experiment_name'] = experiment_name
137 # self.data_manager.add_metadata(experiment_name, metadata)
139 # For each benchmark in the cycle the workload is deployed
140 for benchmark in self.benchmarks:
141 log_msg = 'Benchmark {} started on {}'.format(
142 benchmark.get_name(), template_file_name
144 common.LOG.info(log_msg)
146 # Initialization of Benchmark
148 log_msg = 'Template {} deployment START'.\
149 format(experiment_name)
150 common.LOG.info(log_msg)
152 # Deployment of the workload
153 deployment_success = \
154 common.DEPLOYMENT_UNIT.deploy_heat_template(
155 self.template_dir + template_file_name,
157 self.heat_template_parameters)
159 if deployment_success:
160 log_msg = 'Template {} deployment COMPLETED'.format(
162 common.LOG.info(log_msg)
164 log_msg = 'Template {} deployment FAILED'.format(
166 common.LOG.info(log_msg)
169 # Running the Benchmark/test case
170 result = benchmark.run()
171 # self.data_manager.add_data_points(experiment_name,
172 # benchmark.get_name(),
175 # Terminate the workload
176 log_msg = 'Destroying deployment for experiment {}'.\
177 format(experiment_name)
178 common.LOG.info(log_msg)
179 common.DEPLOYMENT_UNIT.destroy_heat_template(
182 # Finalize the benchmark
184 log_msg = 'Benchmark {} terminated'.format(
185 benchmark.__class__.__name__)
186 common.LOG.info(log_msg)
187 # self.data_manager.generate_result_csv_file()
189 experiment['benchmark'] = benchmark.get_name()
190 for key in benchmark.get_params():
191 experiment[key] = benchmark.get_params()[key]
192 common.LOG.info('Benchmark Finished')
193 # self.data_manager.generate_result_csv_file()
194 common.LOG.info('Benchmarking Unit: Experiments completed!')
197 def get_experiment_configuration(self, template_file_name):
199 Reads and returns the configuration for the specific experiment
202 :param template_file_name: (str) Name of the file for the heat
203 template for which it is requested the configuration
205 :return: dict() Configuration parameters and values
207 file_name = "{}{}.json".format(self.template_dir, template_file_name)
208 with open(file_name) as json_file:
209 configuration = json.load(json_file)
212 def get_benchmark_name(self, name, instance=0):
214 Returns the name to be used for the benchmark/test case (TC).
215 This is required since each benchmark/TC could be run more than once
216 within the same cycle, with different initialization parameters.
217 In order to distinguish between them, a unique name is generated.
219 :param name: (str) original name of the benchmark/TC
221 :param instance: (int) number of instance already in the queue for
222 this type of benchmark/TC.
224 :return: (str) name to be assigned to the benchmark/TC
226 if name + "_" + str(instance) in self.benchmark_names:
228 return self.get_benchmark_name(name, instance)
229 self.benchmark_names.append(name + "_" + str(instance))
230 return name + "_" + str(instance)
233 def extract_experiment_name(template_file_name):
235 Generates a unique experiment name for a given template.
237 :param template_file_name: (str) File name of the template used
238 during the experiment string
240 :return: (str) Experiment Name
242 strings = template_file_name.split('.')
243 return ".".join(strings[:(len(strings) - 1)])
246 def get_benchmark_class(complete_module_name):
248 Returns the classes included in a given module.
250 :param complete_module_name: (str) Complete name of the module as
251 returned by get_available_test_cases.
253 :return: Class related to the benchmark/TC present in the requested
256 strings = complete_module_name.split('.')
257 class_name = 'experimental_framework.benchmarks.{}'.format(strings[0])
258 pkg = __import__(class_name, globals(), locals(), [], 0)
259 module = getattr(getattr(pkg, 'benchmarks'), strings[0])
260 members = inspect.getmembers(module)
262 if inspect.isclass(m[1]):
263 class_name = m[1]("", dict()).__class__.__name__
264 if isinstance(m[1]("", dict()), base.BenchmarkBaseClass) and \
265 not class_name == 'BenchmarkBaseClass':
269 def get_required_benchmarks(required_benchmarks):
271 Returns instances of required test cases.
273 :param required_benchmarks: (list() of strings) Benchmarks to be
274 executed by the experimental framework.
276 :return: list() of BenchmarkBaseClass
279 for b in required_benchmarks:
280 class_ = BenchmarkingUnit.get_benchmark_class(b)
281 instance = class_("", dict())
282 benchmarks.append(instance)