Merge "Generate pod.yaml from current context"
[yardstick.git] / yardstick / benchmark / scenarios / networking / vnf_generic.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """ NSPerf specific scenario definition """
15
16 from __future__ import absolute_import
17
18 import logging
19 import errno
20
21 import ipaddress
22 import os
23 import sys
24 import re
25 from itertools import chain
26
27 import six
28 import yaml
29 from collections import defaultdict
30
31 from yardstick.benchmark.scenarios import base
32 from yardstick.common.constants import LOG_DIR
33 from yardstick.common.utils import import_modules_from_package, itersubclasses
34 from yardstick.common.yaml_loader import yaml_load
35 from yardstick.network_services.collector.subscriber import Collector
36 from yardstick.network_services.vnf_generic import vnfdgen
37 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
38 from yardstick.network_services.traffic_profile.base import TrafficProfile
39 from yardstick.network_services.utils import get_nsb_option
40 from yardstick import ssh
41
42
43 LOG = logging.getLogger(__name__)
44
45
46 class SSHError(Exception):
47     """Class handles ssh connection error exception"""
48     pass
49
50
51 class SSHTimeout(SSHError):
52     """Class handles ssh connection timeout exception"""
53     pass
54
55
56 class IncorrectConfig(Exception):
57     """Class handles incorrect configuration during setup"""
58     pass
59
60
61 class IncorrectSetup(Exception):
62     """Class handles incorrect setup during setup"""
63     pass
64
65
66 class SshManager(object):
67     def __init__(self, node, timeout=120):
68         super(SshManager, self).__init__()
69         self.node = node
70         self.conn = None
71         self.timeout = timeout
72
73     def __enter__(self):
74         """
75         args -> network device mappings
76         returns -> ssh connection ready to be used
77         """
78         try:
79             self.conn = ssh.SSH.from_node(self.node)
80             self.conn.wait(timeout=self.timeout)
81         except SSHError as error:
82             LOG.info("connect failed to %s, due to %s", self.node["ip"], error)
83         # self.conn defaults to None
84         return self.conn
85
86     def __exit__(self, exc_type, exc_val, exc_tb):
87         if self.conn:
88             self.conn.close()
89
90
91 def find_relative_file(path, task_path):
92     """
93     Find file in one of places: in abs of path or
94     relative to TC scenario file. In this order.
95
96     :param path:
97     :param task_path:
98     :return str: full path to file
99     """
100     # fixme: create schema to validate all fields have been provided
101     for lookup in [os.path.abspath(path), os.path.join(task_path, path)]:
102         try:
103             with open(lookup):
104                 return lookup
105         except IOError:
106             pass
107     raise IOError(errno.ENOENT, 'Unable to find {} file'.format(path))
108
109
110 def open_relative_file(path, task_path):
111     try:
112         return open(path)
113     except IOError as e:
114         if e.errno == errno.ENOENT:
115             return open(os.path.join(task_path, path))
116         raise
117
118
119 class NetworkServiceTestCase(base.Scenario):
120     """Class handles Generic framework to do pre-deployment VNF &
121        Network service testing  """
122
123     __scenario_type__ = "NSPerf"
124
125     def __init__(self, scenario_cfg, context_cfg):  # Yardstick API
126         super(NetworkServiceTestCase, self).__init__()
127         self.scenario_cfg = scenario_cfg
128         self.context_cfg = context_cfg
129
130         # fixme: create schema to validate all fields have been provided
131         with open_relative_file(scenario_cfg["topology"],
132                                 scenario_cfg['task_path']) as stream:
133             topology_yaml = yaml_load(stream)
134
135         self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
136         self.vnfs = []
137         self.collector = None
138         self.traffic_profile = None
139         self.node_netdevs = {}
140
141     def _get_ip_flow_range(self, ip_start_range):
142
143         node_name, range_or_interface = next(iter(ip_start_range.items()), (None, '0.0.0.0'))
144         if node_name is not None:
145             node = self.context_cfg["nodes"].get(node_name, {})
146             try:
147                 # the ip_range is the interface name
148                 interface = node.get("interfaces", {})[range_or_interface]
149             except KeyError:
150                 ip = "0.0.0.0"
151                 mask = "255.255.255.0"
152             else:
153                 ip = interface["local_ip"]
154                 # we can't default these values, they must both exist to be valid
155                 mask = interface["netmask"]
156
157             ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), strict=False)
158             hosts = list(ipaddr.hosts())
159             if len(hosts) > 2:
160                 # skip the first host in case of gateway
161                 ip_addr_range = "{}-{}".format(hosts[1], hosts[-1])
162             else:
163                 LOG.warning("Only single IP in range %s", ipaddr)
164                 # fall back to single IP range
165                 ip_addr_range = ip
166         else:
167             # we are manually specifying the range
168             ip_addr_range = range_or_interface
169         return ip_addr_range
170
171     def _get_traffic_flow(self):
172         flow = {}
173         try:
174             # TODO: should be .0  or .1 so we can use list
175             # but this also roughly matches uplink_0, downlink_0
176             fflow = self.scenario_cfg["options"]["flow"]
177             for index, src in enumerate(fflow.get("src_ip", [])):
178                 flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)
179
180             for index, dst in enumerate(fflow.get("dst_ip", [])):
181                 flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)
182
183             for index, publicip in enumerate(fflow.get("public_ip", [])):
184                 flow["public_ip_{}".format(index)] = publicip
185
186             flow["count"] = fflow["count"]
187         except KeyError:
188             flow = {}
189         return {"flow": flow}
190
191     def _get_traffic_imix(self):
192         try:
193             imix = {"imix": self.scenario_cfg['options']['framesize']}
194         except KeyError:
195             imix = {}
196         return imix
197
198     def _get_traffic_profile(self):
199         profile = self.scenario_cfg["traffic_profile"]
200         path = self.scenario_cfg["task_path"]
201         with open_relative_file(profile, path) as infile:
202             return infile.read()
203
204     def _fill_traffic_profile(self):
205         traffic_mapping = self._get_traffic_profile()
206         traffic_map_data = {
207             'flow': self._get_traffic_flow(),
208             'imix': self._get_traffic_imix(),
209             TrafficProfile.UPLINK: {},
210             TrafficProfile.DOWNLINK: {},
211         }
212
213         traffic_vnfd = vnfdgen.generate_vnfd(traffic_mapping, traffic_map_data)
214         self.traffic_profile = TrafficProfile.get(traffic_vnfd)
215         return self.traffic_profile
216
217     def _find_vnf_name_from_id(self, vnf_id):
218         return next((vnfd["vnfd-id-ref"]
219                      for vnfd in self.topology["constituent-vnfd"]
220                      if vnf_id == vnfd["member-vnf-index"]), None)
221
222     @staticmethod
223     def get_vld_networks(networks):
224         # network name is vld_id
225         vld_map = {}
226         for name, n in networks.items():
227             try:
228                 vld_map[n['vld_id']] = n
229             except KeyError:
230                 vld_map[name] = n
231         return vld_map
232
233     @staticmethod
234     def find_node_if(nodes, name, if_name, vld_id):
235         try:
236             # check for xe0, xe1
237             intf = nodes[name]["interfaces"][if_name]
238         except KeyError:
239             # if not xe0, then maybe vld_id,  uplink_0, downlink_0
240             # pop it and re-insert with the correct name from topology
241             intf = nodes[name]["interfaces"].pop(vld_id)
242             nodes[name]["interfaces"][if_name] = intf
243         return intf
244
245     def _resolve_topology(self):
246         for vld in self.topology["vld"]:
247             try:
248                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
249             except (ValueError, TypeError):
250                 raise IncorrectConfig("Topology file corrupted, "
251                                       "wrong endpoint count for connection")
252
253             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
254             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
255
256             node0_if_name = node0_data["vnfd-connection-point-ref"]
257             node1_if_name = node1_data["vnfd-connection-point-ref"]
258
259             try:
260                 nodes = self.context_cfg["nodes"]
261                 node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
262                 node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
263
264                 # names so we can do reverse lookups
265                 node0_if["ifname"] = node0_if_name
266                 node1_if["ifname"] = node1_if_name
267
268                 node0_if["node_name"] = node0_name
269                 node1_if["node_name"] = node1_name
270
271                 node0_if["vld_id"] = vld["id"]
272                 node1_if["vld_id"] = vld["id"]
273
274                 # set peer name
275                 node0_if["peer_name"] = node1_name
276                 node1_if["peer_name"] = node0_name
277
278                 # set peer interface name
279                 node0_if["peer_ifname"] = node1_if_name
280                 node1_if["peer_ifname"] = node0_if_name
281
282                 # just load the network
283                 vld_networks = self.get_vld_networks(self.context_cfg["networks"])
284                 node0_if["network"] = vld_networks.get(vld["id"], {})
285                 node1_if["network"] = vld_networks.get(vld["id"], {})
286
287                 node0_if["dst_mac"] = node1_if["local_mac"]
288                 node0_if["dst_ip"] = node1_if["local_ip"]
289
290                 node1_if["dst_mac"] = node0_if["local_mac"]
291                 node1_if["dst_ip"] = node0_if["local_ip"]
292
293             except KeyError:
294                 LOG.exception("")
295                 raise IncorrectConfig("Required interface not found, "
296                                       "topology file corrupted")
297
298         for vld in self.topology['vld']:
299             try:
300                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
301             except (ValueError, TypeError):
302                 raise IncorrectConfig("Topology file corrupted, "
303                                       "wrong endpoint count for connection")
304
305             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
306             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
307
308             node0_if_name = node0_data["vnfd-connection-point-ref"]
309             node1_if_name = node1_data["vnfd-connection-point-ref"]
310
311             nodes = self.context_cfg["nodes"]
312             node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
313             node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
314
315             # add peer interface dict, but remove circular link
316             # TODO: don't waste memory
317             node0_copy = node0_if.copy()
318             node1_copy = node1_if.copy()
319             node0_if["peer_intf"] = node1_copy
320             node1_if["peer_intf"] = node0_copy
321
322     def _find_vnfd_from_vnf_idx(self, vnf_idx):
323         return next((vnfd for vnfd in self.topology["constituent-vnfd"]
324                      if vnf_idx == vnfd["member-vnf-index"]), None)
325
326     def _update_context_with_topology(self):
327         for vnfd in self.topology["constituent-vnfd"]:
328             vnf_idx = vnfd["member-vnf-index"]
329             vnf_name = self._find_vnf_name_from_id(vnf_idx)
330             vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
331             self.context_cfg["nodes"][vnf_name].update(vnfd)
332
333     def _probe_netdevs(self, node, node_dict, timeout=120):
334         try:
335             return self.node_netdevs[node]
336         except KeyError:
337             pass
338
339         netdevs = {}
340         cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"
341
342         with SshManager(node_dict, timeout=timeout) as conn:
343             if conn:
344                 exit_status = conn.execute(cmd)[0]
345                 if exit_status != 0:
346                     raise IncorrectSetup("Node's %s lacks ip tool." % node)
347                 exit_status, stdout, _ = conn.execute(
348                     self.FIND_NETDEVICE_STRING)
349                 if exit_status != 0:
350                     raise IncorrectSetup(
351                         "Cannot find netdev info in sysfs" % node)
352                 netdevs = node_dict['netdevs'] = self.parse_netdev_info(stdout)
353
354         self.node_netdevs[node] = netdevs
355         return netdevs
356
357     @classmethod
358     def _probe_missing_values(cls, netdevs, network):
359
360         mac_lower = network['local_mac'].lower()
361         for netdev in netdevs.values():
362             if netdev['address'].lower() != mac_lower:
363                 continue
364             network.update({
365                 'driver': netdev['driver'],
366                 'vpci': netdev['pci_bus_id'],
367                 'ifindex': netdev['ifindex'],
368             })
369
370     def _generate_pod_yaml(self):
371         context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
372         # convert OrderedDict to a list
373         # pod.yaml nodes is a list
374         nodes = []
375         for node in self.context_cfg["nodes"].values():
376             # name field is required
377             # remove context suffix
378             node['name'] = node['name'].split('.')[0]
379             nodes.append(node)
380         nodes = self._convert_pkeys_to_string(nodes)
381         pod_dict = {
382             "nodes": nodes,
383             "networks": self.context_cfg["networks"]
384         }
385         with open(context_yaml, "w") as context_out:
386             yaml.safe_dump(pod_dict, context_out, default_flow_style=False,
387                            explicit_start=True)
388
389     @staticmethod
390     def _convert_pkeys_to_string(nodes):
391         # make copy because we are mutating
392         nodes = nodes[:]
393         for i, node in enumerate(nodes):
394             try:
395                 nodes[i] = dict(node, pkey=ssh.convert_key_to_str(node["pkey"]))
396             except KeyError:
397                 pass
398         return nodes
399
400     TOPOLOGY_REQUIRED_KEYS = frozenset({
401         "vpci", "local_ip", "netmask", "local_mac", "driver"})
402
403     def map_topology_to_infrastructure(self):
404         """ This method should verify if the available resources defined in pod.yaml
405         match the topology.yaml file.
406
407         :return: None. Side effect: context_cfg is updated
408         """
409         num_nodes = len(self.context_cfg["nodes"])
410         # OpenStack instance creation time is probably proportional to the number
411         # of instances
412         timeout = 120 * num_nodes
413         for node, node_dict in self.context_cfg["nodes"].items():
414
415             for network in node_dict["interfaces"].values():
416                 missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network)
417                 if not missing:
418                     continue
419
420                 # only ssh probe if there are missing values
421                 # ssh probe won't work on Ixia, so we had better define all our values
422                 try:
423                     netdevs = self._probe_netdevs(node, node_dict, timeout=timeout)
424                 except (SSHError, SSHTimeout):
425                     raise IncorrectConfig(
426                         "Unable to probe missing interface fields '%s', on node %s "
427                         "SSH Error" % (', '.join(missing), node))
428                 try:
429                     self._probe_missing_values(netdevs, network)
430                 except KeyError:
431                     pass
432                 else:
433                     missing = self.TOPOLOGY_REQUIRED_KEYS.difference(
434                         network)
435                 if missing:
436                     raise IncorrectConfig(
437                         "Require interface fields '%s' not found, topology file "
438                         "corrupted" % ', '.join(missing))
439
440         # we have to generate pod.yaml here so we have vpci and driver
441         self._generate_pod_yaml()
442         # 3. Use topology file to find connections & resolve dest address
443         self._resolve_topology()
444         self._update_context_with_topology()
445
446     FIND_NETDEVICE_STRING = r"""find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
447 $1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
448 $1/device/subsystem_vendor $1/device/subsystem_device ; \
449 printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
450 ' sh  \{\}/* \;
451 """
452     BASE_ADAPTER_RE = re.compile(
453         '^/sys/devices/(.*)/net/([^/]*)/([^:]*):(.*)$', re.M)
454
455     @classmethod
456     def parse_netdev_info(cls, stdout):
457         network_devices = defaultdict(dict)
458         matches = cls.BASE_ADAPTER_RE.findall(stdout)
459         for bus_path, interface_name, name, value in matches:
460             dirname, bus_id = os.path.split(bus_path)
461             if 'virtio' in bus_id:
462                 # for some stupid reason VMs include virtio1/
463                 # in PCI device path
464                 bus_id = os.path.basename(dirname)
465             # remove extra 'device/' from 'device/vendor,
466             # device/subsystem_vendor', etc.
467             if 'device/' in name:
468                 name = name.split('/')[1]
469             network_devices[interface_name][name] = value
470             network_devices[interface_name][
471                 'interface_name'] = interface_name
472             network_devices[interface_name]['pci_bus_id'] = bus_id
473         # convert back to regular dict
474         return dict(network_devices)
475
476     @classmethod
477     def get_vnf_impl(cls, vnf_model_id):
478         """ Find the implementing class from vnf_model["vnf"]["name"] field
479
480         :param vnf_model_id: parsed vnfd model ID field
481         :return: subclass of GenericVNF
482         """
483         import_modules_from_package(
484             "yardstick.network_services.vnf_generic.vnf")
485         expected_name = vnf_model_id
486         classes_found = []
487
488         def impl():
489             for name, class_ in ((c.__name__, c) for c in itersubclasses(GenericVNF)):
490                 if name == expected_name:
491                     yield class_
492                 classes_found.append(name)
493
494         try:
495             return next(impl())
496         except StopIteration:
497             pass
498
499         raise IncorrectConfig("No implementation for %s found in %s" %
500                               (expected_name, classes_found))
501
502     @staticmethod
503     def create_interfaces_from_node(vnfd, node):
504         ext_intfs = vnfd["vdu"][0]["external-interface"] = []
505         # have to sort so xe0 goes first
506         for intf_name, intf in sorted(node['interfaces'].items()):
507             # only interfaces with vld_id are added.
508             # Thus there are two layers of filters, only intefaces with vld_id
509             # show up in interfaces, and only interfaces with traffic profiles
510             # are used by the generators
511             if intf.get('vld_id'):
512                 # force dpkd_port_num to int so we can do reverse lookup
513                 try:
514                     intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
515                 except KeyError:
516                     pass
517                 ext_intf = {
518                     "name": intf_name,
519                     "virtual-interface": intf,
520                     "vnfd-connection-point-ref": intf_name,
521                 }
522                 ext_intfs.append(ext_intf)
523
524     def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
525         """ Create VNF objects based on YAML descriptors
526
527         :param scenario_cfg:
528         :type scenario_cfg:
529         :param context_cfg:
530         :return:
531         """
532         trex_lib_path = get_nsb_option('trex_client_lib')
533         sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))
534
535         if scenario_cfg is None:
536             scenario_cfg = self.scenario_cfg
537
538         if context_cfg is None:
539             context_cfg = self.context_cfg
540
541         vnfs = []
542         # we assume OrderedDict for consistenct in instantiation
543         for node_name, node in context_cfg["nodes"].items():
544             LOG.debug(node)
545             file_name = node["VNF model"]
546             file_path = scenario_cfg['task_path']
547             with open_relative_file(file_name, file_path) as stream:
548                 vnf_model = stream.read()
549             vnfd = vnfdgen.generate_vnfd(vnf_model, node)
550             # TODO: here add extra context_cfg["nodes"] regardless of template
551             vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
552             # force inject pkey if it exists
553             # we want to standardize Heat using pkey as a string so we don't rely
554             # on the filesystem
555             try:
556                 vnfd['mgmt-interface']['pkey'] = node['pkey']
557             except KeyError:
558                 pass
559             self.create_interfaces_from_node(vnfd, node)
560             vnf_impl = self.get_vnf_impl(vnfd['id'])
561             vnf_instance = vnf_impl(node_name, vnfd)
562             vnfs.append(vnf_instance)
563
564         self.vnfs = vnfs
565         return vnfs
566
567     def setup(self):
568         """ Setup infrastructure, provission VNFs & start traffic
569
570         :return:
571         """
572         # 1. Verify if infrastructure mapping can meet topology
573         self.map_topology_to_infrastructure()
574         # 1a. Load VNF models
575         self.load_vnf_models()
576         # 1b. Fill traffic profile with information from topology
577         self._fill_traffic_profile()
578
579         # 2. Provision VNFs
580
581         # link events will cause VNF application to exit
582         # so we should start traffic runners before VNFs
583         traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
584         non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
585         try:
586             for vnf in chain(traffic_runners, non_traffic_runners):
587                 LOG.info("Instantiating %s", vnf.name)
588                 vnf.instantiate(self.scenario_cfg, self.context_cfg)
589                 LOG.info("Waiting for %s to instantiate", vnf.name)
590                 vnf.wait_for_instantiate()
591         except RuntimeError:
592             for vnf in self.vnfs:
593                 vnf.terminate()
594             raise
595
596         # 3. Run experiment
597         # Start listeners first to avoid losing packets
598         for traffic_gen in traffic_runners:
599             traffic_gen.listen_traffic(self.traffic_profile)
600
601         # register collector with yardstick for KPI collection.
602         self.collector = Collector(self.vnfs, self.traffic_profile)
603         self.collector.start()
604
605         # Start the actual traffic
606         for traffic_gen in traffic_runners:
607             LOG.info("Starting traffic on %s", traffic_gen.name)
608             traffic_gen.run_traffic(self.traffic_profile)
609
610     def run(self, result):  # yardstick API
611         """ Yardstick calls run() at intervals defined in the yaml and
612             produces timestamped samples
613
614         :param result: dictionary with results to update
615         :return: None
616         """
617
618         for vnf in self.vnfs:
619             # Result example:
620             # {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
621             LOG.debug("collect KPI for %s", vnf.name)
622             result.update(self.collector.get_kpi(vnf))
623
624     def teardown(self):
625         """ Stop the collector and terminate VNF & TG instance
626
627         :return
628         """
629
630         self.collector.stop()
631         for vnf in self.vnfs:
632             LOG.info("Stopping %s", vnf.name)
633             vnf.terminate()