1 # Copyright (c) 2016-2017 Intel Corporation
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
17 from itertools import chain
26 from yardstick.benchmark.contexts import base as context_base
27 from yardstick.benchmark.scenarios import base as scenario_base
28 from yardstick.common.constants import LOG_DIR
29 from yardstick.common import exceptions
30 from yardstick.common.process import terminate_children
31 from yardstick.common import utils
32 from yardstick.network_services.collector.subscriber import Collector
33 from yardstick.network_services.vnf_generic import vnfdgen
34 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
35 from yardstick.network_services import traffic_profile
36 from yardstick.network_services.traffic_profile import base as tprofile_base
37 from yardstick.network_services.utils import get_nsb_option
38 from yardstick import ssh
41 traffic_profile.register_modules()
44 LOG = logging.getLogger(__name__)
47 class NetworkServiceTestCase(scenario_base.Scenario):
48 """Class handles Generic framework to do pre-deployment VNF &
49 Network service testing """
51 __scenario_type__ = "NSPerf"
53 def __init__(self, scenario_cfg, context_cfg): # pragma: no cover
54 super(NetworkServiceTestCase, self).__init__()
55 self.scenario_cfg = scenario_cfg
56 self.context_cfg = context_cfg
58 self._render_topology()
61 self.traffic_profile = None
62 self.node_netdevs = {}
63 self.bin_path = get_nsb_option('bin_path', '')
67 return self.traffic_profile is not None and self.traffic_profile.is_ended()
69 def _get_ip_flow_range(self, ip_start_range):
70 """Retrieve a CIDR first and last viable IPs
72 :param ip_start_range: could be the IP range itself or a dictionary
73 with the host name and the port.
74 :return: (str) IP range (min, max) with this format "x.x.x.x-y.y.y.y"
76 if isinstance(ip_start_range, six.string_types):
79 node_name, range_or_interface = next(iter(ip_start_range.items()),
82 return range_or_interface
84 node = self.context_cfg['nodes'].get(node_name, {})
85 interface = node.get('interfaces', {}).get(range_or_interface)
87 ip = interface['local_ip']
88 mask = interface['netmask']
91 mask = '255.255.255.0'
93 ipaddr = ipaddress.ip_network(
94 six.text_type('{}/{}'.format(ip, mask)), strict=False)
95 if ipaddr.prefixlen + 2 < ipaddr.max_prefixlen:
96 ip_addr_range = '{}-{}'.format(ipaddr[2], ipaddr[-2])
98 LOG.warning('Only single IP in range %s', ipaddr)
102 def _get_traffic_flow(self):
105 # TODO: should be .0 or .1 so we can use list
106 # but this also roughly matches uplink_0, downlink_0
107 fflow = self.scenario_cfg["options"]["flow"]
108 for index, src in enumerate(fflow.get("src_ip", [])):
109 flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)
111 for index, dst in enumerate(fflow.get("dst_ip", [])):
112 flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)
114 for index, publicip in enumerate(fflow.get("public_ip", [])):
115 flow["public_ip_{}".format(index)] = publicip
117 for index, src_port in enumerate(fflow.get("src_port", [])):
118 flow["src_port_{}".format(index)] = src_port
120 for index, dst_port in enumerate(fflow.get("dst_port", [])):
121 flow["dst_port_{}".format(index)] = dst_port
124 flow["count"] = fflow["count"]
126 if "srcseed" in fflow:
127 flow["srcseed"] = fflow["srcseed"]
129 if "dstseed" in fflow:
130 flow["dstseed"] = fflow["dstseed"]
134 return {"flow": flow}
136 def _get_traffic_imix(self):
138 imix = {"imix": self.scenario_cfg['options']['framesize']}
143 def _get_traffic_profile(self):
144 profile = self.scenario_cfg["traffic_profile"]
145 path = self.scenario_cfg["task_path"]
146 with utils.open_relative_file(profile, path) as infile:
149 def _get_duration(self):
150 options = self.scenario_cfg.get('options', {})
151 return options.get('duration',
152 tprofile_base.TrafficProfileConfig.DEFAULT_DURATION)
154 def _key_list_to_dict(self, key, value_list):
157 for index, count in enumerate(value_list[key]):
158 value_dict["{}_{}".format(key, index)] = count
164 def _get_simulated_users(self):
165 users = self.scenario_cfg.get("options", {}).get("simulated_users", {})
166 simulated_users = self._key_list_to_dict("uplink", users)
167 return {"simulated_users": simulated_users}
169 def _get_page_object(self):
170 objects = self.scenario_cfg.get("options", {}).get("page_object", {})
171 page_object = self._key_list_to_dict("uplink", objects)
172 return {"page_object": page_object}
174 def _fill_traffic_profile(self):
175 tprofile = self._get_traffic_profile()
176 extra_args = self.scenario_cfg.get('extra_args', {})
178 'flow': self._get_traffic_flow(),
179 'imix': self._get_traffic_imix(),
180 tprofile_base.TrafficProfile.UPLINK: {},
181 tprofile_base.TrafficProfile.DOWNLINK: {},
182 'extra_args': extra_args,
183 'duration': self._get_duration(),
184 'page_object': self._get_page_object(),
185 'simulated_users': self._get_simulated_users()}
186 traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
189 self.scenario_cfg.get("options", {}).get("traffic_config", {})
191 traffic_vnfd.setdefault("traffic_profile", {})
192 traffic_vnfd["traffic_profile"].update(traffic_config)
194 self.traffic_profile = \
195 tprofile_base.TrafficProfile.get(traffic_vnfd)
197 def _get_topology(self):
198 topology = self.scenario_cfg["topology"]
199 path = self.scenario_cfg["task_path"]
200 with utils.open_relative_file(topology, path) as infile:
203 def _render_topology(self):
204 topology = self._get_topology()
205 topology_args = self.scenario_cfg.get('extra_args', {})
207 'extra_args': topology_args
209 topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
210 self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
212 def _find_vnf_name_from_id(self, vnf_id): # pragma: no cover
213 return next((vnfd["vnfd-id-ref"]
214 for vnfd in self.topology["constituent-vnfd"]
215 if vnf_id == vnfd["member-vnf-index"]), None)
217 def _find_vnfd_from_vnf_idx(self, vnf_id): # pragma: no cover
219 for vnfd in self.topology["constituent-vnfd"]
220 if vnf_id == vnfd["member-vnf-index"]), None)
223 def find_node_if(nodes, name, if_name, vld_id): # pragma: no cover
226 intf = nodes[name]["interfaces"][if_name]
228 # if not xe0, then maybe vld_id, uplink_0, downlink_0
229 # pop it and re-insert with the correct name from topology
230 intf = nodes[name]["interfaces"].pop(vld_id)
231 nodes[name]["interfaces"][if_name] = intf
234 def _resolve_topology(self):
235 for vld in self.topology["vld"]:
237 node0_data, node1_data = vld["vnfd-connection-point-ref"]
238 except (ValueError, TypeError):
239 raise exceptions.IncorrectConfig(
240 error_msg='Topology file corrupted, wrong endpoint count '
243 node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
244 node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
246 node0_if_name = node0_data["vnfd-connection-point-ref"]
247 node1_if_name = node1_data["vnfd-connection-point-ref"]
250 nodes = self.context_cfg["nodes"]
251 node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
252 node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
254 # names so we can do reverse lookups
255 node0_if["ifname"] = node0_if_name
256 node1_if["ifname"] = node1_if_name
258 node0_if["node_name"] = node0_name
259 node1_if["node_name"] = node1_name
261 node0_if["vld_id"] = vld["id"]
262 node1_if["vld_id"] = vld["id"]
265 node0_if["peer_name"] = node1_name
266 node1_if["peer_name"] = node0_name
268 # set peer interface name
269 node0_if["peer_ifname"] = node1_if_name
270 node1_if["peer_ifname"] = node0_if_name
272 # just load the network
273 vld_networks = {n.get('vld_id', name): n for name, n in
274 self.context_cfg["networks"].items()}
276 node0_if["network"] = vld_networks.get(vld["id"], {})
277 node1_if["network"] = vld_networks.get(vld["id"], {})
279 node0_if["dst_mac"] = node1_if["local_mac"]
280 node0_if["dst_ip"] = node1_if["local_ip"]
282 node1_if["dst_mac"] = node0_if["local_mac"]
283 node1_if["dst_ip"] = node0_if["local_ip"]
287 raise exceptions.IncorrectConfig(
288 error_msg='Required interface not found, topology file '
291 for vld in self.topology['vld']:
293 node0_data, node1_data = vld["vnfd-connection-point-ref"]
294 except (ValueError, TypeError):
295 raise exceptions.IncorrectConfig(
296 error_msg='Topology file corrupted, wrong endpoint count '
299 node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
300 node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
302 node0_if_name = node0_data["vnfd-connection-point-ref"]
303 node1_if_name = node1_data["vnfd-connection-point-ref"]
305 nodes = self.context_cfg["nodes"]
306 node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
307 node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
309 # add peer interface dict, but remove circular link
310 # TODO: don't waste memory
311 node0_copy = node0_if.copy()
312 node1_copy = node1_if.copy()
313 node0_if["peer_intf"] = node1_copy
314 node1_if["peer_intf"] = node0_copy
316 def _update_context_with_topology(self): # pragma: no cover
317 for vnfd in self.topology["constituent-vnfd"]:
318 vnf_idx = vnfd["member-vnf-index"]
319 vnf_name = self._find_vnf_name_from_id(vnf_idx)
320 vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
321 self.context_cfg["nodes"][vnf_name].update(vnfd)
323 def _generate_pod_yaml(self): # pragma: no cover
324 context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
325 # convert OrderedDict to a list
326 # pod.yaml nodes is a list
327 nodes = [self._serialize_node(node) for node in self.context_cfg["nodes"].values()]
330 "networks": self.context_cfg["networks"]
332 with open(context_yaml, "w") as context_out:
333 yaml.safe_dump(pod_dict, context_out, default_flow_style=False,
337 def _serialize_node(node): # pragma: no cover
338 new_node = copy.deepcopy(node)
339 # name field is required
340 # remove context suffix
341 new_node["name"] = node['name'].split('.')[0]
343 new_node["pkey"] = ssh.convert_key_to_str(node["pkey"])
348 def map_topology_to_infrastructure(self):
349 """ This method should verify if the available resources defined in pod.yaml
350 match the topology.yaml file.
352 :return: None. Side effect: context_cfg is updated
354 # 3. Use topology file to find connections & resolve dest address
355 self._resolve_topology()
356 self._update_context_with_topology()
359 def get_vnf_impl(cls, vnf_model_id): # pragma: no cover
360 """ Find the implementing class from vnf_model["vnf"]["name"] field
362 :param vnf_model_id: parsed vnfd model ID field
363 :return: subclass of GenericVNF
365 utils.import_modules_from_package(
366 "yardstick.network_services.vnf_generic.vnf")
367 expected_name = vnf_model_id
371 for name, class_ in ((c.__name__, c) for c in
372 utils.itersubclasses(GenericVNF)):
373 if name == expected_name:
375 classes_found.append(name)
379 except StopIteration:
382 message = ('No implementation for %s found in %s'
383 % (expected_name, classes_found))
384 raise exceptions.IncorrectConfig(error_msg=message)
387 def create_interfaces_from_node(vnfd, node): # pragma: no cover
388 ext_intfs = vnfd["vdu"][0]["external-interface"] = []
389 # have to sort so xe0 goes first
390 for intf_name, intf in sorted(node['interfaces'].items()):
391 # only interfaces with vld_id are added.
392 # Thus there are two layers of filters, only intefaces with vld_id
393 # show up in interfaces, and only interfaces with traffic profiles
394 # are used by the generators
395 if intf.get('vld_id'):
396 # force dpkd_port_num to int so we can do reverse lookup
398 intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
403 "virtual-interface": intf,
404 "vnfd-connection-point-ref": intf_name,
406 ext_intfs.append(ext_intf)
408 def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
409 """ Create VNF objects based on YAML descriptors
416 trex_lib_path = get_nsb_option('trex_client_lib')
417 sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))
419 if scenario_cfg is None:
420 scenario_cfg = self.scenario_cfg
422 if context_cfg is None:
423 context_cfg = self.context_cfg
426 # we assume OrderedDict for consistency in instantiation
427 for node_name, node in context_cfg["nodes"].items():
430 file_name = node["VNF model"]
432 LOG.debug("no model for %s, skipping", node_name)
434 file_path = scenario_cfg['task_path']
435 with utils.open_relative_file(file_name, file_path) as stream:
436 vnf_model = stream.read()
437 vnfd = vnfdgen.generate_vnfd(vnf_model, node)
438 # TODO: here add extra context_cfg["nodes"] regardless of template
439 vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
440 # force inject pkey if it exists
441 # we want to standardize Heat using pkey as a string so we don't rely
444 vnfd['mgmt-interface']['pkey'] = node['pkey']
447 self.create_interfaces_from_node(vnfd, node)
448 vnf_impl = self.get_vnf_impl(vnfd['id'])
449 vnf_instance = vnf_impl(node_name, vnfd, scenario_cfg['task_id'])
450 vnfs.append(vnf_instance)
456 """Setup infrastructure, provission VNFs & start traffic"""
457 # 1. Verify if infrastructure mapping can meet topology
458 self.map_topology_to_infrastructure()
459 # 1a. Load VNF models
460 self.load_vnf_models()
461 # 1b. Fill traffic profile with information from topology
462 self._fill_traffic_profile()
466 # link events will cause VNF application to exit
467 # so we should start traffic runners before VNFs
468 traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
469 non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
471 for vnf in chain(traffic_runners, non_traffic_runners):
472 LOG.info("Instantiating %s", vnf.name)
473 vnf.instantiate(self.scenario_cfg, self.context_cfg)
474 LOG.info("Waiting for %s to instantiate", vnf.name)
475 vnf.wait_for_instantiate()
478 for vnf in self.vnfs:
482 # we have to generate pod.yaml here after VNF has probed so we know vpci and driver
483 self._generate_pod_yaml()
486 # Start listeners first to avoid losing packets
487 for traffic_gen in traffic_runners:
488 traffic_gen.listen_traffic(self.traffic_profile)
490 # register collector with yardstick for KPI collection.
491 self.collector = Collector(self.vnfs, context_base.Context.get_physical_nodes())
492 self.collector.start()
494 # Start the actual traffic
495 for traffic_gen in traffic_runners:
496 LOG.info("Starting traffic on %s", traffic_gen.name)
497 traffic_gen.run_traffic(self.traffic_profile)
498 self._mq_ids.append(traffic_gen.get_mq_producer_id())
500 def get_mq_ids(self): # pragma: no cover
501 """Return stored MQ producer IDs"""
504 def run(self, result): # yardstick API
505 """ Yardstick calls run() at intervals defined in the yaml and
506 produces timestamped samples
508 :param result: dictionary with results to update
512 # this is the only method that is check from the runner
513 # so if we have any fatal error it must be raised via these methods
514 # otherwise we will not terminate
516 result.update(self.collector.get_kpi())
519 """ Stop the collector and terminate VNF & TG instance
526 self.collector.stop()
527 for vnf in self.vnfs:
528 LOG.info("Stopping %s", vnf.name)
530 LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
534 # catch any exception in teardown and convert to simple exception
535 # never pass exceptions back to multiprocessing, because some exceptions can
537 # https://bugs.python.org/issue9400
539 raise RuntimeError("Error in teardown")
541 def pre_run_wait_time(self, time_seconds): # pragma: no cover
542 """Time waited before executing the run method"""
543 time.sleep(time_seconds)
545 def post_run_wait_time(self, time_seconds): # pragma: no cover
546 """Time waited after executing the run method"""