Merge "exec_tests: remove releng clone code"
[yardstick.git] / yardstick / benchmark / scenarios / networking / vnf_generic.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """ NSPerf specific scenario definition """
15
16 from __future__ import absolute_import
17
18 import logging
19 import errno
20
21 import ipaddress
22 import os
23 import sys
24 import re
25 from itertools import chain
26
27 import six
28 import yaml
29 from collections import defaultdict
30
31 from yardstick.benchmark.scenarios import base
32 from yardstick.common.constants import LOG_DIR
33 from yardstick.common.utils import import_modules_from_package, itersubclasses
34 from yardstick.common.yaml_loader import yaml_load
35 from yardstick.network_services.collector.subscriber import Collector
36 from yardstick.network_services.vnf_generic import vnfdgen
37 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
38 from yardstick.network_services.traffic_profile.base import TrafficProfile
39 from yardstick.network_services.utils import get_nsb_option
40 from yardstick import ssh
41
42
43 LOG = logging.getLogger(__name__)
44
45
46 class SSHError(Exception):
47     """Class handles ssh connection error exception"""
48     pass
49
50
51 class SSHTimeout(SSHError):
52     """Class handles ssh connection timeout exception"""
53     pass
54
55
56 class IncorrectConfig(Exception):
57     """Class handles incorrect configuration during setup"""
58     pass
59
60
61 class IncorrectSetup(Exception):
62     """Class handles incorrect setup during setup"""
63     pass
64
65
66 class SshManager(object):
67     def __init__(self, node, timeout=120):
68         super(SshManager, self).__init__()
69         self.node = node
70         self.conn = None
71         self.timeout = timeout
72
73     def __enter__(self):
74         """
75         args -> network device mappings
76         returns -> ssh connection ready to be used
77         """
78         try:
79             self.conn = ssh.SSH.from_node(self.node)
80             self.conn.wait(timeout=self.timeout)
81         except SSHError as error:
82             LOG.info("connect failed to %s, due to %s", self.node["ip"], error)
83         # self.conn defaults to None
84         return self.conn
85
86     def __exit__(self, exc_type, exc_val, exc_tb):
87         if self.conn:
88             self.conn.close()
89
90
91 def find_relative_file(path, task_path):
92     """
93     Find file in one of places: in abs of path or
94     relative to TC scenario file. In this order.
95
96     :param path:
97     :param task_path:
98     :return str: full path to file
99     """
100     # fixme: create schema to validate all fields have been provided
101     for lookup in [os.path.abspath(path), os.path.join(task_path, path)]:
102         try:
103             with open(lookup):
104                 return lookup
105         except IOError:
106             pass
107     raise IOError(errno.ENOENT, 'Unable to find {} file'.format(path))
108
109
110 def open_relative_file(path, task_path):
111     try:
112         return open(path)
113     except IOError as e:
114         if e.errno == errno.ENOENT:
115             return open(os.path.join(task_path, path))
116         raise
117
118
119 class NetworkServiceTestCase(base.Scenario):
120     """Class handles Generic framework to do pre-deployment VNF &
121        Network service testing  """
122
123     __scenario_type__ = "NSPerf"
124
125     def __init__(self, scenario_cfg, context_cfg):  # Yardstick API
126         super(NetworkServiceTestCase, self).__init__()
127         self.scenario_cfg = scenario_cfg
128         self.context_cfg = context_cfg
129
130         # fixme: create schema to validate all fields have been provided
131         with open_relative_file(scenario_cfg["topology"],
132                                 scenario_cfg['task_path']) as stream:
133             topology_yaml = yaml_load(stream)
134
135         self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
136         self.vnfs = []
137         self.collector = None
138         self.traffic_profile = None
139         self.node_netdevs = {}
140
141     def _get_ip_flow_range(self, ip_start_range):
142
143         # IP range is specified as 'x.x.x.x-y.y.y.y'
144         if isinstance(ip_start_range, six.string_types):
145             return ip_start_range
146
147         node_name, range_or_interface = next(iter(ip_start_range.items()), (None, '0.0.0.0'))
148         if node_name is None:
149             # we are manually specifying the range
150             ip_addr_range = range_or_interface
151         else:
152             node = self.context_cfg["nodes"].get(node_name, {})
153             try:
154                 # the ip_range is the interface name
155                 interface = node.get("interfaces", {})[range_or_interface]
156             except KeyError:
157                 ip = "0.0.0.0"
158                 mask = "255.255.255.0"
159             else:
160                 ip = interface["local_ip"]
161                 # we can't default these values, they must both exist to be valid
162                 mask = interface["netmask"]
163
164             ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), strict=False)
165             hosts = list(ipaddr.hosts())
166             if len(hosts) > 2:
167                 # skip the first host in case of gateway
168                 ip_addr_range = "{}-{}".format(hosts[1], hosts[-1])
169             else:
170                 LOG.warning("Only single IP in range %s", ipaddr)
171                 # fall back to single IP range
172                 ip_addr_range = ip
173         return ip_addr_range
174
175     def _get_traffic_flow(self):
176         flow = {}
177         try:
178             # TODO: should be .0  or .1 so we can use list
179             # but this also roughly matches uplink_0, downlink_0
180             fflow = self.scenario_cfg["options"]["flow"]
181             for index, src in enumerate(fflow.get("src_ip", [])):
182                 flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)
183
184             for index, dst in enumerate(fflow.get("dst_ip", [])):
185                 flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)
186
187             for index, publicip in enumerate(fflow.get("public_ip", [])):
188                 flow["public_ip_{}".format(index)] = publicip
189
190             flow["count"] = fflow["count"]
191         except KeyError:
192             flow = {}
193         return {"flow": flow}
194
195     def _get_traffic_imix(self):
196         try:
197             imix = {"imix": self.scenario_cfg['options']['framesize']}
198         except KeyError:
199             imix = {}
200         return imix
201
202     def _get_traffic_profile(self):
203         profile = self.scenario_cfg["traffic_profile"]
204         path = self.scenario_cfg["task_path"]
205         with open_relative_file(profile, path) as infile:
206             return infile.read()
207
208     def _fill_traffic_profile(self):
209         traffic_mapping = self._get_traffic_profile()
210         traffic_map_data = {
211             'flow': self._get_traffic_flow(),
212             'imix': self._get_traffic_imix(),
213             TrafficProfile.UPLINK: {},
214             TrafficProfile.DOWNLINK: {},
215         }
216
217         traffic_vnfd = vnfdgen.generate_vnfd(traffic_mapping, traffic_map_data)
218         self.traffic_profile = TrafficProfile.get(traffic_vnfd)
219         return self.traffic_profile
220
221     def _find_vnf_name_from_id(self, vnf_id):
222         return next((vnfd["vnfd-id-ref"]
223                      for vnfd in self.topology["constituent-vnfd"]
224                      if vnf_id == vnfd["member-vnf-index"]), None)
225
226     @staticmethod
227     def get_vld_networks(networks):
228         # network name is vld_id
229         vld_map = {}
230         for name, n in networks.items():
231             try:
232                 vld_map[n['vld_id']] = n
233             except KeyError:
234                 vld_map[name] = n
235         return vld_map
236
237     @staticmethod
238     def find_node_if(nodes, name, if_name, vld_id):
239         try:
240             # check for xe0, xe1
241             intf = nodes[name]["interfaces"][if_name]
242         except KeyError:
243             # if not xe0, then maybe vld_id,  uplink_0, downlink_0
244             # pop it and re-insert with the correct name from topology
245             intf = nodes[name]["interfaces"].pop(vld_id)
246             nodes[name]["interfaces"][if_name] = intf
247         return intf
248
249     def _resolve_topology(self):
250         for vld in self.topology["vld"]:
251             try:
252                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
253             except (ValueError, TypeError):
254                 raise IncorrectConfig("Topology file corrupted, "
255                                       "wrong endpoint count for connection")
256
257             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
258             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
259
260             node0_if_name = node0_data["vnfd-connection-point-ref"]
261             node1_if_name = node1_data["vnfd-connection-point-ref"]
262
263             try:
264                 nodes = self.context_cfg["nodes"]
265                 node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
266                 node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
267
268                 # names so we can do reverse lookups
269                 node0_if["ifname"] = node0_if_name
270                 node1_if["ifname"] = node1_if_name
271
272                 node0_if["node_name"] = node0_name
273                 node1_if["node_name"] = node1_name
274
275                 node0_if["vld_id"] = vld["id"]
276                 node1_if["vld_id"] = vld["id"]
277
278                 # set peer name
279                 node0_if["peer_name"] = node1_name
280                 node1_if["peer_name"] = node0_name
281
282                 # set peer interface name
283                 node0_if["peer_ifname"] = node1_if_name
284                 node1_if["peer_ifname"] = node0_if_name
285
286                 # just load the network
287                 vld_networks = self.get_vld_networks(self.context_cfg["networks"])
288                 node0_if["network"] = vld_networks.get(vld["id"], {})
289                 node1_if["network"] = vld_networks.get(vld["id"], {})
290
291                 node0_if["dst_mac"] = node1_if["local_mac"]
292                 node0_if["dst_ip"] = node1_if["local_ip"]
293
294                 node1_if["dst_mac"] = node0_if["local_mac"]
295                 node1_if["dst_ip"] = node0_if["local_ip"]
296
297             except KeyError:
298                 LOG.exception("")
299                 raise IncorrectConfig("Required interface not found, "
300                                       "topology file corrupted")
301
302         for vld in self.topology['vld']:
303             try:
304                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
305             except (ValueError, TypeError):
306                 raise IncorrectConfig("Topology file corrupted, "
307                                       "wrong endpoint count for connection")
308
309             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
310             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
311
312             node0_if_name = node0_data["vnfd-connection-point-ref"]
313             node1_if_name = node1_data["vnfd-connection-point-ref"]
314
315             nodes = self.context_cfg["nodes"]
316             node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
317             node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
318
319             # add peer interface dict, but remove circular link
320             # TODO: don't waste memory
321             node0_copy = node0_if.copy()
322             node1_copy = node1_if.copy()
323             node0_if["peer_intf"] = node1_copy
324             node1_if["peer_intf"] = node0_copy
325
326     def _find_vnfd_from_vnf_idx(self, vnf_idx):
327         return next((vnfd for vnfd in self.topology["constituent-vnfd"]
328                      if vnf_idx == vnfd["member-vnf-index"]), None)
329
330     def _update_context_with_topology(self):
331         for vnfd in self.topology["constituent-vnfd"]:
332             vnf_idx = vnfd["member-vnf-index"]
333             vnf_name = self._find_vnf_name_from_id(vnf_idx)
334             vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
335             self.context_cfg["nodes"][vnf_name].update(vnfd)
336
337     def _probe_netdevs(self, node, node_dict, timeout=120):
338         try:
339             return self.node_netdevs[node]
340         except KeyError:
341             pass
342
343         netdevs = {}
344         cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"
345
346         with SshManager(node_dict, timeout=timeout) as conn:
347             if conn:
348                 exit_status = conn.execute(cmd)[0]
349                 if exit_status != 0:
350                     raise IncorrectSetup("Node's %s lacks ip tool." % node)
351                 exit_status, stdout, _ = conn.execute(
352                     self.FIND_NETDEVICE_STRING)
353                 if exit_status != 0:
354                     raise IncorrectSetup(
355                         "Cannot find netdev info in sysfs" % node)
356                 netdevs = node_dict['netdevs'] = self.parse_netdev_info(stdout)
357
358         self.node_netdevs[node] = netdevs
359         return netdevs
360
361     @classmethod
362     def _probe_missing_values(cls, netdevs, network):
363
364         mac_lower = network['local_mac'].lower()
365         for netdev in netdevs.values():
366             if netdev['address'].lower() != mac_lower:
367                 continue
368             network.update({
369                 'driver': netdev['driver'],
370                 'vpci': netdev['pci_bus_id'],
371                 'ifindex': netdev['ifindex'],
372             })
373
374     def _generate_pod_yaml(self):
375         context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
376         # convert OrderedDict to a list
377         # pod.yaml nodes is a list
378         nodes = []
379         for node in self.context_cfg["nodes"].values():
380             # name field is required
381             # remove context suffix
382             node['name'] = node['name'].split('.')[0]
383             nodes.append(node)
384         nodes = self._convert_pkeys_to_string(nodes)
385         pod_dict = {
386             "nodes": nodes,
387             "networks": self.context_cfg["networks"]
388         }
389         with open(context_yaml, "w") as context_out:
390             yaml.safe_dump(pod_dict, context_out, default_flow_style=False,
391                            explicit_start=True)
392
393     @staticmethod
394     def _convert_pkeys_to_string(nodes):
395         # make copy because we are mutating
396         nodes = nodes[:]
397         for i, node in enumerate(nodes):
398             try:
399                 nodes[i] = dict(node, pkey=ssh.convert_key_to_str(node["pkey"]))
400             except KeyError:
401                 pass
402         return nodes
403
404     TOPOLOGY_REQUIRED_KEYS = frozenset({
405         "vpci", "local_ip", "netmask", "local_mac", "driver"})
406
407     def map_topology_to_infrastructure(self):
408         """ This method should verify if the available resources defined in pod.yaml
409         match the topology.yaml file.
410
411         :return: None. Side effect: context_cfg is updated
412         """
413         num_nodes = len(self.context_cfg["nodes"])
414         # OpenStack instance creation time is probably proportional to the number
415         # of instances
416         timeout = 120 * num_nodes
417         for node, node_dict in self.context_cfg["nodes"].items():
418
419             for network in node_dict["interfaces"].values():
420                 missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network)
421                 if not missing:
422                     continue
423
424                 # only ssh probe if there are missing values
425                 # ssh probe won't work on Ixia, so we had better define all our values
426                 try:
427                     netdevs = self._probe_netdevs(node, node_dict, timeout=timeout)
428                 except (SSHError, SSHTimeout):
429                     raise IncorrectConfig(
430                         "Unable to probe missing interface fields '%s', on node %s "
431                         "SSH Error" % (', '.join(missing), node))
432                 try:
433                     self._probe_missing_values(netdevs, network)
434                 except KeyError:
435                     pass
436                 else:
437                     missing = self.TOPOLOGY_REQUIRED_KEYS.difference(
438                         network)
439                 if missing:
440                     raise IncorrectConfig(
441                         "Require interface fields '%s' not found, topology file "
442                         "corrupted" % ', '.join(missing))
443
444         # we have to generate pod.yaml here so we have vpci and driver
445         self._generate_pod_yaml()
446         # 3. Use topology file to find connections & resolve dest address
447         self._resolve_topology()
448         self._update_context_with_topology()
449
450     FIND_NETDEVICE_STRING = r"""find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
451 $1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
452 $1/device/subsystem_vendor $1/device/subsystem_device ; \
453 printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
454 ' sh  \{\}/* \;
455 """
456     BASE_ADAPTER_RE = re.compile(
457         '^/sys/devices/(.*)/net/([^/]*)/([^:]*):(.*)$', re.M)
458
459     @classmethod
460     def parse_netdev_info(cls, stdout):
461         network_devices = defaultdict(dict)
462         matches = cls.BASE_ADAPTER_RE.findall(stdout)
463         for bus_path, interface_name, name, value in matches:
464             dirname, bus_id = os.path.split(bus_path)
465             if 'virtio' in bus_id:
466                 # for some stupid reason VMs include virtio1/
467                 # in PCI device path
468                 bus_id = os.path.basename(dirname)
469             # remove extra 'device/' from 'device/vendor,
470             # device/subsystem_vendor', etc.
471             if 'device/' in name:
472                 name = name.split('/')[1]
473             network_devices[interface_name][name] = value
474             network_devices[interface_name][
475                 'interface_name'] = interface_name
476             network_devices[interface_name]['pci_bus_id'] = bus_id
477         # convert back to regular dict
478         return dict(network_devices)
479
480     @classmethod
481     def get_vnf_impl(cls, vnf_model_id):
482         """ Find the implementing class from vnf_model["vnf"]["name"] field
483
484         :param vnf_model_id: parsed vnfd model ID field
485         :return: subclass of GenericVNF
486         """
487         import_modules_from_package(
488             "yardstick.network_services.vnf_generic.vnf")
489         expected_name = vnf_model_id
490         classes_found = []
491
492         def impl():
493             for name, class_ in ((c.__name__, c) for c in itersubclasses(GenericVNF)):
494                 if name == expected_name:
495                     yield class_
496                 classes_found.append(name)
497
498         try:
499             return next(impl())
500         except StopIteration:
501             pass
502
503         raise IncorrectConfig("No implementation for %s found in %s" %
504                               (expected_name, classes_found))
505
506     @staticmethod
507     def create_interfaces_from_node(vnfd, node):
508         ext_intfs = vnfd["vdu"][0]["external-interface"] = []
509         # have to sort so xe0 goes first
510         for intf_name, intf in sorted(node['interfaces'].items()):
511             # only interfaces with vld_id are added.
512             # Thus there are two layers of filters, only intefaces with vld_id
513             # show up in interfaces, and only interfaces with traffic profiles
514             # are used by the generators
515             if intf.get('vld_id'):
516                 # force dpkd_port_num to int so we can do reverse lookup
517                 try:
518                     intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
519                 except KeyError:
520                     pass
521                 ext_intf = {
522                     "name": intf_name,
523                     "virtual-interface": intf,
524                     "vnfd-connection-point-ref": intf_name,
525                 }
526                 ext_intfs.append(ext_intf)
527
528     def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
529         """ Create VNF objects based on YAML descriptors
530
531         :param scenario_cfg:
532         :type scenario_cfg:
533         :param context_cfg:
534         :return:
535         """
536         trex_lib_path = get_nsb_option('trex_client_lib')
537         sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))
538
539         if scenario_cfg is None:
540             scenario_cfg = self.scenario_cfg
541
542         if context_cfg is None:
543             context_cfg = self.context_cfg
544
545         vnfs = []
546         # we assume OrderedDict for consistenct in instantiation
547         for node_name, node in context_cfg["nodes"].items():
548             LOG.debug(node)
549             try:
550                 file_name = node["VNF model"]
551             except KeyError:
552                 LOG.debug("no model for %s, skipping", node_name)
553                 continue
554             file_path = scenario_cfg['task_path']
555             with open_relative_file(file_name, file_path) as stream:
556                 vnf_model = stream.read()
557             vnfd = vnfdgen.generate_vnfd(vnf_model, node)
558             # TODO: here add extra context_cfg["nodes"] regardless of template
559             vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
560             # force inject pkey if it exists
561             # we want to standardize Heat using pkey as a string so we don't rely
562             # on the filesystem
563             try:
564                 vnfd['mgmt-interface']['pkey'] = node['pkey']
565             except KeyError:
566                 pass
567             self.create_interfaces_from_node(vnfd, node)
568             vnf_impl = self.get_vnf_impl(vnfd['id'])
569             vnf_instance = vnf_impl(node_name, vnfd)
570             vnfs.append(vnf_instance)
571
572         self.vnfs = vnfs
573         return vnfs
574
575     def setup(self):
576         """ Setup infrastructure, provission VNFs & start traffic
577
578         :return:
579         """
580         # 1. Verify if infrastructure mapping can meet topology
581         self.map_topology_to_infrastructure()
582         # 1a. Load VNF models
583         self.load_vnf_models()
584         # 1b. Fill traffic profile with information from topology
585         self._fill_traffic_profile()
586
587         # 2. Provision VNFs
588
589         # link events will cause VNF application to exit
590         # so we should start traffic runners before VNFs
591         traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
592         non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
593         try:
594             for vnf in chain(traffic_runners, non_traffic_runners):
595                 LOG.info("Instantiating %s", vnf.name)
596                 vnf.instantiate(self.scenario_cfg, self.context_cfg)
597                 LOG.info("Waiting for %s to instantiate", vnf.name)
598                 vnf.wait_for_instantiate()
599         except RuntimeError:
600             for vnf in self.vnfs:
601                 vnf.terminate()
602             raise
603
604         # 3. Run experiment
605         # Start listeners first to avoid losing packets
606         for traffic_gen in traffic_runners:
607             traffic_gen.listen_traffic(self.traffic_profile)
608
609         # register collector with yardstick for KPI collection.
610         self.collector = Collector(self.vnfs, self.context_cfg["nodes"], self.traffic_profile)
611         self.collector.start()
612
613         # Start the actual traffic
614         for traffic_gen in traffic_runners:
615             LOG.info("Starting traffic on %s", traffic_gen.name)
616             traffic_gen.run_traffic(self.traffic_profile)
617
618     def run(self, result):  # yardstick API
619         """ Yardstick calls run() at intervals defined in the yaml and
620             produces timestamped samples
621
622         :param result: dictionary with results to update
623         :return: None
624         """
625
626         # this is the only method that is check from the runner
627         # so if we have any fatal error it must be raised via these methods
628         # otherwise we will not terminate
629
630         result.update(self.collector.get_kpi())
631
632     def teardown(self):
633         """ Stop the collector and terminate VNF & TG instance
634
635         :return
636         """
637
638         self.collector.stop()
639         for vnf in self.vnfs:
640             LOG.info("Stopping %s", vnf.name)
641             vnf.terminate()