1 # Copyright (c) 2016-2017 Intel Corporation
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """ NSPerf specific scenario definition """
16 from __future__ import absolute_import
23 from operator import itemgetter
24 from collections import defaultdict
28 from yardstick.benchmark.scenarios import base
29 from yardstick.common.utils import import_modules_from_package, itersubclasses
30 from yardstick.network_services.collector.subscriber import Collector
31 from yardstick.network_services.vnf_generic import vnfdgen
32 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
33 from yardstick.network_services.traffic_profile.base import TrafficProfile
34 from yardstick import ssh
36 LOG = logging.getLogger(__name__)
39 class SSHError(Exception):
40 """Class handles ssh connection error exception"""
44 class SSHTimeout(SSHError):
45 """Class handles ssh connection timeout exception"""
49 class IncorrectConfig(Exception):
50 """Class handles incorrect configuration during setup"""
54 class IncorrectSetup(Exception):
55 """Class handles incorrect setup during setup"""
59 class SshManager(object):
60 def __init__(self, node):
61 super(SshManager, self).__init__()
67 args -> network device mappings
68 returns -> ssh connection ready to be used
71 self.conn = ssh.SSH.from_node(self.node)
73 except SSHError as error:
74 LOG.info("connect failed to %s, due to %s", self.node["ip"], error)
75 # self.conn defaults to None
78 def __exit__(self, exc_type, exc_val, exc_tb):
83 def open_relative_file(path, task_path):
87 if e.errno == errno.ENOENT:
88 return open(os.path.join(task_path, path))
92 class NetworkServiceTestCase(base.Scenario):
93 """Class handles Generic framework to do pre-deployment VNF &
94 Network service testing """
96 __scenario_type__ = "NSPerf"
98 def __init__(self, scenario_cfg, context_cfg): # Yardstick API
99 super(NetworkServiceTestCase, self).__init__()
100 self.scenario_cfg = scenario_cfg
101 self.context_cfg = context_cfg
103 # fixme: create schema to validate all fields have been provided
104 with open_relative_file(scenario_cfg["topology"],
105 scenario_cfg['task_path']) as stream:
106 topology_yaml = yaml.load(stream)
108 self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
110 self.collector = None
111 self.traffic_profile = None
114 def _get_traffic_flow(cls, scenario_cfg):
116 with open(scenario_cfg["traffic_options"]["flow"]) as fflow:
117 flow = yaml.load(fflow)
118 except (KeyError, IOError, OSError):
123 def _get_traffic_imix(cls, scenario_cfg):
125 with open(scenario_cfg["traffic_options"]["imix"]) as fimix:
126 imix = yaml.load(fimix)
127 except (KeyError, IOError, OSError):
132 def _get_traffic_profile(cls, scenario_cfg, context_cfg):
133 traffic_profile_tpl = ""
137 with open_relative_file(scenario_cfg["traffic_profile"],
138 scenario_cfg["task_path"]) as infile:
139 traffic_profile_tpl = infile.read()
141 except (KeyError, IOError, OSError):
144 return [traffic_profile_tpl, private, public]
146 def _fill_traffic_profile(self, scenario_cfg, context_cfg):
147 flow = self._get_traffic_flow(scenario_cfg)
149 imix = self._get_traffic_imix(scenario_cfg)
151 traffic_mapping, private, public = \
152 self._get_traffic_profile(scenario_cfg, context_cfg)
154 traffic_profile = vnfdgen.generate_vnfd(traffic_mapping,
155 {"imix": imix, "flow": flow,
159 return TrafficProfile.get(traffic_profile)
162 def _find_vnf_name_from_id(cls, topology, vnf_id):
163 return next((vnfd["vnfd-id-ref"]
164 for vnfd in topology["constituent-vnfd"]
165 if vnf_id == vnfd["member-vnf-index"]), None)
167 def _resolve_topology(self, context_cfg, topology):
168 for vld in topology["vld"]:
169 if len(vld["vnfd-connection-point-ref"]) > 2:
170 raise IncorrectConfig("Topology file corrupted, "
171 "too many endpoint for connection")
173 node_0, node_1 = vld["vnfd-connection-point-ref"]
175 node0 = self._find_vnf_name_from_id(topology,
176 node_0["member-vnf-index-ref"])
177 node1 = self._find_vnf_name_from_id(topology,
178 node_1["member-vnf-index-ref"])
180 if0 = node_0["vnfd-connection-point-ref"]
181 if1 = node_1["vnfd-connection-point-ref"]
184 nodes = context_cfg["nodes"]
185 nodes[node0]["interfaces"][if0]["vld_id"] = vld["id"]
186 nodes[node1]["interfaces"][if1]["vld_id"] = vld["id"]
188 nodes[node0]["interfaces"][if0]["dst_mac"] = \
189 nodes[node1]["interfaces"][if1]["local_mac"]
190 nodes[node0]["interfaces"][if0]["dst_ip"] = \
191 nodes[node1]["interfaces"][if1]["local_ip"]
193 nodes[node1]["interfaces"][if1]["dst_mac"] = \
194 nodes[node0]["interfaces"][if0]["local_mac"]
195 nodes[node1]["interfaces"][if1]["dst_ip"] = \
196 nodes[node0]["interfaces"][if0]["local_ip"]
198 raise IncorrectConfig("Required interface not found,"
199 "topology file corrupted")
202 def _find_list_index_from_vnf_idx(cls, topology, vnf_idx):
203 return next((topology["constituent-vnfd"].index(vnfd)
204 for vnfd in topology["constituent-vnfd"]
205 if vnf_idx == vnfd["member-vnf-index"]), None)
207 def _update_context_with_topology(self, context_cfg, topology):
208 for idx in topology["constituent-vnfd"]:
209 vnf_idx = idx["member-vnf-index"]
210 nodes = context_cfg["nodes"]
211 node = self._find_vnf_name_from_id(topology, vnf_idx)
212 list_idx = self._find_list_index_from_vnf_idx(topology, vnf_idx)
213 nodes[node].update(topology["constituent-vnfd"][list_idx])
216 def _sort_dpdk_port_num(netdevs):
217 # dpdk_port_num is PCI BUS ID ordering, lowest first
218 s = sorted(netdevs.values(), key=itemgetter('pci_bus_id'))
219 for dpdk_port_num, netdev in enumerate(s, 1):
220 netdev['dpdk_port_num'] = dpdk_port_num
223 def _probe_missing_values(cls, netdevs, network, missing):
224 mac = network['local_mac']
225 for netdev in netdevs.values():
226 if netdev['address'].lower() == mac.lower():
227 network['driver'] = netdev['driver']
228 network['vpci'] = netdev['pci_bus_id']
229 network['dpdk_port_num'] = netdev['dpdk_port_num']
230 network['ifindex'] = netdev['ifindex']
232 TOPOLOGY_REQUIRED_KEYS = frozenset({
233 "vpci", "local_ip", "netmask", "local_mac", "driver", "dpdk_port_num"})
235 def map_topology_to_infrastructure(self, context_cfg, topology):
236 """ This method should verify if the available resources defined in pod.yaml
237 match the topology.yaml file.
240 :return: None. Side effect: context_cfg is updated
243 for node, node_dict in context_cfg["nodes"].items():
245 cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"
246 with SshManager(node_dict) as conn:
247 exit_status = conn.execute(cmd)[0]
249 raise IncorrectSetup("Node's %s lacks ip tool." % node)
250 exit_status, stdout, _ = conn.execute(
251 self.FIND_NETDEVICE_STRING)
253 raise IncorrectSetup(
254 "Cannot find netdev info in sysfs" % node)
255 netdevs = node_dict['netdevs'] = self.parse_netdev_info(
257 self._sort_dpdk_port_num(netdevs)
259 for network in node_dict["interfaces"].values():
260 missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network)
263 self._probe_missing_values(netdevs, network,
268 missing = self.TOPOLOGY_REQUIRED_KEYS.difference(
271 raise IncorrectConfig(
272 "Require interface fields '%s' "
273 "not found, topology file "
274 "corrupted" % ', '.join(missing))
276 # 3. Use topology file to find connections & resolve dest address
277 self._resolve_topology(context_cfg, topology)
278 self._update_context_with_topology(context_cfg, topology)
280 FIND_NETDEVICE_STRING = r"""find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
281 $1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
282 $1/device/subsystem_vendor $1/device/subsystem_device ; \
283 printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
286 BASE_ADAPTER_RE = re.compile(
287 '^/sys/devices/(.*)/net/([^/]*)/([^:]*):(.*)$', re.M)
290 def parse_netdev_info(cls, stdout):
291 network_devices = defaultdict(dict)
292 matches = cls.BASE_ADAPTER_RE.findall(stdout)
293 for bus_path, interface_name, name, value in matches:
294 dirname, bus_id = os.path.split(bus_path)
295 if 'virtio' in bus_id:
296 # for some stupid reason VMs include virtio1/
298 bus_id = os.path.basename(dirname)
299 # remove extra 'device/' from 'device/vendor,
300 # device/subsystem_vendor', etc.
301 if 'device/' in name:
302 name = name.split('/')[1]
303 network_devices[interface_name][name] = value
304 network_devices[interface_name][
305 'interface_name'] = interface_name
306 network_devices[interface_name]['pci_bus_id'] = bus_id
307 # convert back to regular dict
308 return dict(network_devices)
311 def get_vnf_impl(cls, vnf_model):
312 """ Find the implementing class from vnf_model["vnf"]["name"] field
314 :param vnf_model: dictionary containing a parsed vnfd
315 :return: subclass of GenericVNF
317 import_modules_from_package(
318 "yardstick.network_services.vnf_generic.vnf")
319 expected_name = vnf_model['id']
320 impl = (c for c in itersubclasses(GenericVNF)
321 if c.__name__ == expected_name)
324 except StopIteration:
325 raise IncorrectConfig("No implementation for %s", expected_name)
327 def load_vnf_models(self, scenario_cfg, context_cfg):
328 """ Create VNF objects based on YAML descriptors
336 for node_name, node in context_cfg["nodes"].items():
338 with open_relative_file(node["VNF model"],
339 scenario_cfg['task_path']) as stream:
340 vnf_model = stream.read()
341 vnfd = vnfdgen.generate_vnfd(vnf_model, node)
342 vnf_impl = self.get_vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
343 vnf_instance = vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
344 vnf_instance.name = node_name
345 vnfs.append(vnf_instance)
350 """ Setup infrastructure, provission VNFs & start traffic
354 # 1. Verify if infrastructure mapping can meet topology
355 self.map_topology_to_infrastructure(self.context_cfg, self.topology)
356 # 1a. Load VNF models
357 self.vnfs = self.load_vnf_models(self.scenario_cfg, self.context_cfg)
358 # 1b. Fill traffic profile with information from topology
359 self.traffic_profile = self._fill_traffic_profile(self.scenario_cfg,
364 for vnf in self.vnfs:
365 LOG.info("Instantiating %s", vnf.name)
366 vnf.instantiate(self.scenario_cfg, self.context_cfg)
368 for vnf in self.vnfs:
373 # Start listeners first to avoid losing packets
374 traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
375 for traffic_gen in traffic_runners:
376 traffic_gen.listen_traffic(self.traffic_profile)
378 # register collector with yardstick for KPI collection.
379 self.collector = Collector(self.vnfs, self.traffic_profile)
380 self.collector.start()
382 # Start the actual traffic
383 for traffic_gen in traffic_runners:
384 LOG.info("Starting traffic on %s", traffic_gen.name)
385 traffic_gen.run_traffic(self.traffic_profile)
387 def run(self, result): # yardstick API
388 """ Yardstick calls run() at intervals defined in the yaml and
389 produces timestamped samples
391 :param result: dictionary with results to update
395 for vnf in self.vnfs:
397 # {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
399 result.update(self.collector.get_kpi(vnf))
402 """ Stop the collector and terminate VNF & TG instance
407 self.collector.stop()
408 for vnf in self.vnfs:
409 LOG.info("Stopping %s", vnf.name)