1 # Copyright (c) 2015 Intel Research and Development Ireland Ltd.
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
16 The Benchmarking Unit manages the Benchmarking of VNFs orchestrating the
17 initialization, execution and finalization
25 from experimental_framework.benchmarks import benchmark_base_class as base
26 from experimental_framework import common
27 from experimental_framework import data_manager as data
28 from experimental_framework import heat_template_generation as heat
29 from experimental_framework import deployment_unit as deploy
32 class BenchmarkingUnit:
34 Management of the overall Benchmarking process
37 def __init__(self, heat_template_name, openstack_credentials,
38 heat_template_parameters, iterations, benchmarks):
40 :param heat_template_name: (str) Name of the heat template.
42 :param openstack_credentials: (dict) Credentials for openstack.
43 Required fields are: 'ip_controller', 'heat_url',
44 'user', 'password', 'auth_uri', 'project'.
46 :param heat_template_parameters: (dict) parameters to be given as
47 input to the heat template. Required keys depend on
48 the specific heat template.
50 :param iterations: (int) number of cycles to be executed.
52 :param benchmarks: (list[str]) List of the names of the
53 benchmarks/test_cases to be executed in the cycle.
57 # Loads vars from configuration file
58 self.template_file_extension = common.TEMPLATE_FILE_EXTENSION
59 self.template_dir = common.get_template_dir()
60 self.results_directory = str(common.RESULT_DIR) + str(time.time())
62 # Initializes other internal variable from parameters
63 self.template_name = heat_template_name
64 self.iterations = iterations
65 self.required_benchmarks = benchmarks
66 self.template_files = []
67 self.benchmarks = list()
68 self.benchmark_names = list()
69 self.data_manager = data.DataManager(self.results_directory)
70 self.heat_template_parameters = heat_template_parameters
71 self.template_files = \
72 heat.get_all_heat_templates(self.template_dir,
73 self.template_file_extension)
74 common.DEPLOYMENT_UNIT = deploy.DeploymentUnit(openstack_credentials)
78 Initialize the environment in order to run the benchmarking
82 for benchmark in self.required_benchmarks:
83 benchmark_class = BenchmarkingUnit.get_benchmark_class(
85 # Need to generate a unique name for the benchmark
86 # (since there is the possibility to have different
87 # instances of the same benchmark)
88 self.benchmarks.append(benchmark_class(
89 self.get_benchmark_name(benchmark['name']),
92 for template_file_name in self.template_files:
93 experiment_name = BenchmarkingUnit.extract_experiment_name(
95 self.data_manager.create_new_experiment(experiment_name)
96 for benchmark in self.benchmarks:
97 self.data_manager.add_benchmark(experiment_name,
102 Finalizes the Benchmarking Unit
103 Destroys all the stacks deployed by the framework and save results on
108 self.data_manager.generate_result_csv_file()
109 common.DEPLOYMENT_UNIT.destroy_all_deployed_stacks()
111 def run_benchmarks(self):
113 Runs all the requested benchmarks and collect the results.
117 common.LOG.info('Run Benchmarking Unit')
121 for iteration in range(0, self.iterations):
122 common.LOG.info('Iteration ' + str(iteration))
123 for template_file_name in self.template_files:
124 experiment_name = BenchmarkingUnit.\
125 extract_experiment_name(template_file_name)
126 experiment['experiment_name'] = experiment_name
127 configuration = self.\
128 get_experiment_configuration(template_file_name)
129 # self.data_manager.add_configuration(experiment_name,
131 for key in configuration.keys():
132 experiment[key] = configuration[key]
134 # metadata['experiment_name'] = experiment_name
135 # self.data_manager.add_metadata(experiment_name, metadata)
137 # For each benchmark in the cycle the workload is deployed
138 for benchmark in self.benchmarks:
139 log_msg = 'Benchmark {} started on {}'.format(
140 benchmark.get_name(), template_file_name
142 common.LOG.info(log_msg)
144 # Initialization of Benchmark
146 log_msg = 'Template {} deployment START'.\
147 format(experiment_name)
148 common.LOG.info(log_msg)
150 # Deployment of the workload
151 deployment_success = \
152 common.DEPLOYMENT_UNIT.deploy_heat_template(
153 self.template_dir + template_file_name,
155 self.heat_template_parameters)
157 if deployment_success:
158 log_msg = 'Template {} deployment COMPLETED'.format(
160 common.LOG.info(log_msg)
162 log_msg = 'Template {} deployment FAILED'.format(
164 common.LOG.info(log_msg)
167 # Running the Benchmark/test case
168 result = benchmark.run()
169 # self.data_manager.add_data_points(experiment_name,
170 # benchmark.get_name(),
173 # Terminate the workload
174 log_msg = 'Destroying deployment for experiment {}'.\
175 format(experiment_name)
176 common.LOG.info(log_msg)
177 common.DEPLOYMENT_UNIT.destroy_heat_template(
180 # Finalize the benchmark
182 log_msg = 'Benchmark {} terminated'.format(
183 benchmark.__class__.__name__)
184 common.LOG.info(log_msg)
185 # self.data_manager.generate_result_csv_file()
187 experiment['benchmark'] = benchmark.get_name()
188 for key in benchmark.get_params():
189 experiment[key] = benchmark.get_params()[key]
190 common.LOG.info('Benchmark Finished')
191 self.data_manager.generate_result_csv_file()
192 common.LOG.info('Benchmarking Unit: Experiments completed!')
195 def get_experiment_configuration(self, template_file_name):
197 Reads and returns the configuration for the specific experiment
200 :param template_file_name: (str) Name of the file for the heat
201 template for which it is requested the configuration
203 :return: dict() Configuration parameters and values
205 file_name = "{}{}.json".format(self.template_dir, template_file_name)
206 with open(file_name) as json_file:
207 configuration = json.load(json_file)
210 def get_benchmark_name(self, name, instance=0):
212 Returns the name to be used for the benchmark/test case (TC).
213 This is required since each benchmark/TC could be run more than once
214 within the same cycle, with different initialization parameters.
215 In order to distinguish between them, a unique name is generated.
217 :param name: (str) original name of the benchmark/TC
219 :param instance: (int) number of instance already in the queue for
220 this type of benchmark/TC.
222 :return: (str) name to be assigned to the benchmark/TC
224 if name + "_" + str(instance) in self.benchmark_names:
226 return self.get_benchmark_name(name, instance)
227 self.benchmark_names.append(name + "_" + str(instance))
228 return name + "_" + str(instance)
231 def extract_experiment_name(template_file_name):
233 Generates a unique experiment name for a given template.
235 :param template_file_name: (str) File name of the template used
236 during the experiment string
238 :return: (str) Experiment Name
240 strings = template_file_name.split('.')
241 return ".".join(strings[:(len(strings)-1)])
244 def get_benchmark_class(complete_module_name):
246 Returns the classes included in a given module.
248 :param complete_module_name: (str) Complete name of the module as
249 returned by get_available_test_cases.
251 :return: Class related to the benchmark/TC present in the requested
254 strings = complete_module_name.split('.')
255 class_name = 'experimental_framework.benchmarks.{}'.format(strings[0])
256 pkg = __import__(class_name, globals(), locals(), [], -1)
257 module = getattr(getattr(pkg, 'benchmarks'), strings[0])
258 members = inspect.getmembers(module)
260 if inspect.isclass(m[1]):
261 class_name = m[1]("", dict()).__class__.__name__
262 if isinstance(m[1]("", dict()), base.BenchmarkBaseClass) and \
263 not class_name == 'BenchmarkBaseClass':
267 def get_required_benchmarks(required_benchmarks):
269 Returns instances of required test cases.
271 :param required_benchmarks: (list() of strings) Benchmarks to be
272 executed by the experimental framework.
274 :return: list() of BenchmarkBaseClass
277 for b in required_benchmarks:
278 class_ = BenchmarkingUnit.get_benchmark_class(b)
279 instance = class_("", dict())
280 benchmarks.append(instance)