1 # Copyright (c) 2016-2017 Intel Corporation
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
17 from itertools import chain
26 from yardstick.benchmark.scenarios import base as scenario_base
27 from yardstick.common.constants import LOG_DIR
28 from yardstick.common import exceptions
29 from yardstick.common.process import terminate_children
30 from yardstick.common import utils
31 from yardstick.network_services.collector.subscriber import Collector
32 from yardstick.network_services.vnf_generic import vnfdgen
33 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
34 from yardstick.network_services import traffic_profile
35 from yardstick.network_services.traffic_profile import base as tprofile_base
36 from yardstick.network_services.utils import get_nsb_option
37 from yardstick import ssh
39 traffic_profile.register_modules()
42 LOG = logging.getLogger(__name__)
45 class NetworkServiceTestCase(scenario_base.Scenario):
46 """Class handles Generic framework to do pre-deployment VNF &
47 Network service testing """
49 __scenario_type__ = "NSPerf"
51 def __init__(self, scenario_cfg, context_cfg): # Yardstick API
52 super(NetworkServiceTestCase, self).__init__()
53 self.scenario_cfg = scenario_cfg
54 self.context_cfg = context_cfg
56 self._render_topology()
59 self.traffic_profile = None
60 self.node_netdevs = {}
61 self.bin_path = get_nsb_option('bin_path', '')
63 def _get_ip_flow_range(self, ip_start_range):
65 # IP range is specified as 'x.x.x.x-y.y.y.y'
66 if isinstance(ip_start_range, six.string_types):
69 node_name, range_or_interface = next(iter(ip_start_range.items()), (None, '0.0.0.0'))
71 # we are manually specifying the range
72 ip_addr_range = range_or_interface
74 node = self.context_cfg["nodes"].get(node_name, {})
76 # the ip_range is the interface name
77 interface = node.get("interfaces", {})[range_or_interface]
80 mask = "255.255.255.0"
82 ip = interface["local_ip"]
83 # we can't default these values, they must both exist to be valid
84 mask = interface["netmask"]
86 ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), strict=False)
87 hosts = list(ipaddr.hosts())
89 # skip the first host in case of gateway
90 ip_addr_range = "{}-{}".format(hosts[1], hosts[-1])
92 LOG.warning("Only single IP in range %s", ipaddr)
93 # fall back to single IP range
97 def _get_traffic_flow(self):
100 # TODO: should be .0 or .1 so we can use list
101 # but this also roughly matches uplink_0, downlink_0
102 fflow = self.scenario_cfg["options"]["flow"]
103 for index, src in enumerate(fflow.get("src_ip", [])):
104 flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)
106 for index, dst in enumerate(fflow.get("dst_ip", [])):
107 flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)
109 for index, publicip in enumerate(fflow.get("public_ip", [])):
110 flow["public_ip_{}".format(index)] = publicip
112 for index, src_port in enumerate(fflow.get("src_port", [])):
113 flow["src_port_{}".format(index)] = src_port
115 for index, dst_port in enumerate(fflow.get("dst_port", [])):
116 flow["dst_port_{}".format(index)] = dst_port
118 flow["count"] = fflow["count"]
121 return {"flow": flow}
123 def _get_traffic_imix(self):
125 imix = {"imix": self.scenario_cfg['options']['framesize']}
130 def _get_traffic_profile(self):
131 profile = self.scenario_cfg["traffic_profile"]
132 path = self.scenario_cfg["task_path"]
133 with utils.open_relative_file(profile, path) as infile:
136 def _get_topology(self):
137 topology = self.scenario_cfg["topology"]
138 path = self.scenario_cfg["task_path"]
139 with utils.open_relative_file(topology, path) as infile:
142 def _fill_traffic_profile(self):
143 tprofile = self._get_traffic_profile()
144 extra_args = self.scenario_cfg.get('extra_args', {})
146 'flow': self._get_traffic_flow(),
147 'imix': self._get_traffic_imix(),
148 tprofile_base.TrafficProfile.UPLINK: {},
149 tprofile_base.TrafficProfile.DOWNLINK: {},
150 'extra_args': extra_args
153 traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
154 self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd)
156 def _render_topology(self):
157 topology = self._get_topology()
158 topology_args = self.scenario_cfg.get('extra_args', {})
160 'extra_args': topology_args
162 topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
163 self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
165 def _find_vnf_name_from_id(self, vnf_id):
166 return next((vnfd["vnfd-id-ref"]
167 for vnfd in self.topology["constituent-vnfd"]
168 if vnf_id == vnfd["member-vnf-index"]), None)
170 def _find_vnfd_from_vnf_idx(self, vnf_id):
172 for vnfd in self.topology["constituent-vnfd"]
173 if vnf_id == vnfd["member-vnf-index"]), None)
176 def find_node_if(nodes, name, if_name, vld_id):
179 intf = nodes[name]["interfaces"][if_name]
181 # if not xe0, then maybe vld_id, uplink_0, downlink_0
182 # pop it and re-insert with the correct name from topology
183 intf = nodes[name]["interfaces"].pop(vld_id)
184 nodes[name]["interfaces"][if_name] = intf
187 def _resolve_topology(self):
188 for vld in self.topology["vld"]:
190 node0_data, node1_data = vld["vnfd-connection-point-ref"]
191 except (ValueError, TypeError):
192 raise exceptions.IncorrectConfig(
193 error_msg='Topology file corrupted, wrong endpoint count '
196 node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
197 node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
199 node0_if_name = node0_data["vnfd-connection-point-ref"]
200 node1_if_name = node1_data["vnfd-connection-point-ref"]
203 nodes = self.context_cfg["nodes"]
204 node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
205 node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
207 # names so we can do reverse lookups
208 node0_if["ifname"] = node0_if_name
209 node1_if["ifname"] = node1_if_name
211 node0_if["node_name"] = node0_name
212 node1_if["node_name"] = node1_name
214 node0_if["vld_id"] = vld["id"]
215 node1_if["vld_id"] = vld["id"]
218 node0_if["peer_name"] = node1_name
219 node1_if["peer_name"] = node0_name
221 # set peer interface name
222 node0_if["peer_ifname"] = node1_if_name
223 node1_if["peer_ifname"] = node0_if_name
225 # just load the network
226 vld_networks = {n.get('vld_id', name): n for name, n in
227 self.context_cfg["networks"].items()}
229 node0_if["network"] = vld_networks.get(vld["id"], {})
230 node1_if["network"] = vld_networks.get(vld["id"], {})
232 node0_if["dst_mac"] = node1_if["local_mac"]
233 node0_if["dst_ip"] = node1_if["local_ip"]
235 node1_if["dst_mac"] = node0_if["local_mac"]
236 node1_if["dst_ip"] = node0_if["local_ip"]
240 raise exceptions.IncorrectConfig(
241 error_msg='Required interface not found, topology file '
244 for vld in self.topology['vld']:
246 node0_data, node1_data = vld["vnfd-connection-point-ref"]
247 except (ValueError, TypeError):
248 raise exceptions.IncorrectConfig(
249 error_msg='Topology file corrupted, wrong endpoint count '
252 node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
253 node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
255 node0_if_name = node0_data["vnfd-connection-point-ref"]
256 node1_if_name = node1_data["vnfd-connection-point-ref"]
258 nodes = self.context_cfg["nodes"]
259 node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
260 node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
262 # add peer interface dict, but remove circular link
263 # TODO: don't waste memory
264 node0_copy = node0_if.copy()
265 node1_copy = node1_if.copy()
266 node0_if["peer_intf"] = node1_copy
267 node1_if["peer_intf"] = node0_copy
269 def _update_context_with_topology(self):
270 for vnfd in self.topology["constituent-vnfd"]:
271 vnf_idx = vnfd["member-vnf-index"]
272 vnf_name = self._find_vnf_name_from_id(vnf_idx)
273 vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
274 self.context_cfg["nodes"][vnf_name].update(vnfd)
276 def _generate_pod_yaml(self):
277 context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
278 # convert OrderedDict to a list
279 # pod.yaml nodes is a list
280 nodes = [self._serialize_node(node) for node in self.context_cfg["nodes"].values()]
283 "networks": self.context_cfg["networks"]
285 with open(context_yaml, "w") as context_out:
286 yaml.safe_dump(pod_dict, context_out, default_flow_style=False,
290 def _serialize_node(node):
291 new_node = copy.deepcopy(node)
292 # name field is required
293 # remove context suffix
294 new_node["name"] = node['name'].split('.')[0]
296 new_node["pkey"] = ssh.convert_key_to_str(node["pkey"])
301 def map_topology_to_infrastructure(self):
302 """ This method should verify if the available resources defined in pod.yaml
303 match the topology.yaml file.
305 :return: None. Side effect: context_cfg is updated
307 # 3. Use topology file to find connections & resolve dest address
308 self._resolve_topology()
309 self._update_context_with_topology()
312 def get_vnf_impl(cls, vnf_model_id):
313 """ Find the implementing class from vnf_model["vnf"]["name"] field
315 :param vnf_model_id: parsed vnfd model ID field
316 :return: subclass of GenericVNF
318 utils.import_modules_from_package(
319 "yardstick.network_services.vnf_generic.vnf")
320 expected_name = vnf_model_id
324 for name, class_ in ((c.__name__, c) for c in
325 utils.itersubclasses(GenericVNF)):
326 if name == expected_name:
328 classes_found.append(name)
332 except StopIteration:
335 message = ('No implementation for %s found in %s'
336 % (expected_name, classes_found))
337 raise exceptions.IncorrectConfig(error_msg=message)
340 def create_interfaces_from_node(vnfd, node):
341 ext_intfs = vnfd["vdu"][0]["external-interface"] = []
342 # have to sort so xe0 goes first
343 for intf_name, intf in sorted(node['interfaces'].items()):
344 # only interfaces with vld_id are added.
345 # Thus there are two layers of filters, only intefaces with vld_id
346 # show up in interfaces, and only interfaces with traffic profiles
347 # are used by the generators
348 if intf.get('vld_id'):
349 # force dpkd_port_num to int so we can do reverse lookup
351 intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
356 "virtual-interface": intf,
357 "vnfd-connection-point-ref": intf_name,
359 ext_intfs.append(ext_intf)
361 def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
362 """ Create VNF objects based on YAML descriptors
369 trex_lib_path = get_nsb_option('trex_client_lib')
370 sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))
372 if scenario_cfg is None:
373 scenario_cfg = self.scenario_cfg
375 if context_cfg is None:
376 context_cfg = self.context_cfg
379 # we assume OrderedDict for consistency in instantiation
380 for node_name, node in context_cfg["nodes"].items():
383 file_name = node["VNF model"]
385 LOG.debug("no model for %s, skipping", node_name)
387 file_path = scenario_cfg['task_path']
388 with utils.open_relative_file(file_name, file_path) as stream:
389 vnf_model = stream.read()
390 vnfd = vnfdgen.generate_vnfd(vnf_model, node)
391 # TODO: here add extra context_cfg["nodes"] regardless of template
392 vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
393 # force inject pkey if it exists
394 # we want to standardize Heat using pkey as a string so we don't rely
397 vnfd['mgmt-interface']['pkey'] = node['pkey']
400 self.create_interfaces_from_node(vnfd, node)
401 vnf_impl = self.get_vnf_impl(vnfd['id'])
402 vnf_instance = vnf_impl(node_name, vnfd)
403 vnfs.append(vnf_instance)
409 """ Setup infrastructure, provission VNFs & start traffic
413 # 1. Verify if infrastructure mapping can meet topology
414 self.map_topology_to_infrastructure()
415 # 1a. Load VNF models
416 self.load_vnf_models()
417 # 1b. Fill traffic profile with information from topology
418 self._fill_traffic_profile()
422 # link events will cause VNF application to exit
423 # so we should start traffic runners before VNFs
424 traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
425 non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
427 for vnf in chain(traffic_runners, non_traffic_runners):
428 LOG.info("Instantiating %s", vnf.name)
429 vnf.instantiate(self.scenario_cfg, self.context_cfg)
430 LOG.info("Waiting for %s to instantiate", vnf.name)
431 vnf.wait_for_instantiate()
434 for vnf in self.vnfs:
438 # we have to generate pod.yaml here after VNF has probed so we know vpci and driver
439 self._generate_pod_yaml()
442 # Start listeners first to avoid losing packets
443 for traffic_gen in traffic_runners:
444 traffic_gen.listen_traffic(self.traffic_profile)
446 # register collector with yardstick for KPI collection.
447 self.collector = Collector(self.vnfs)
448 self.collector.start()
450 # Start the actual traffic
451 for traffic_gen in traffic_runners:
452 LOG.info("Starting traffic on %s", traffic_gen.name)
453 traffic_gen.run_traffic(self.traffic_profile)
455 def run(self, result): # yardstick API
456 """ Yardstick calls run() at intervals defined in the yaml and
457 produces timestamped samples
459 :param result: dictionary with results to update
463 # this is the only method that is check from the runner
464 # so if we have any fatal error it must be raised via these methods
465 # otherwise we will not terminate
467 result.update(self.collector.get_kpi())
470 """ Stop the collector and terminate VNF & TG instance
477 self.collector.stop()
478 for vnf in self.vnfs:
479 LOG.info("Stopping %s", vnf.name)
481 LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
485 # catch any exception in teardown and convert to simple exception
486 # never pass exceptions back to multiprocessing, because some exceptions can
488 # https://bugs.python.org/issue9400
490 raise RuntimeError("Error in teardown")
492 def pre_run_wait_time(self, time_seconds):
493 """Time waited before executing the run method"""
494 time.sleep(time_seconds)
496 def post_run_wait_time(self, time_seconds):
497 """Time waited after executing the run method"""