NSB: move interface probe to VNF, and attempt driver-only probe first
[yardstick.git] / yardstick / benchmark / scenarios / networking / vnf_generic.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import copy
16 import logging
17
18 import ipaddress
19 from itertools import chain
20 import os
21 import sys
22
23 import six
24 import yaml
25
26 from yardstick.benchmark.scenarios import base as scenario_base
27 from yardstick.error import IncorrectConfig
28 from yardstick.common.constants import LOG_DIR
29 from yardstick.common.process import terminate_children
30 from yardstick.common import utils
31 from yardstick.network_services.collector.subscriber import Collector
32 from yardstick.network_services.vnf_generic import vnfdgen
33 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
34 from yardstick.network_services import traffic_profile
35 from yardstick.network_services.traffic_profile import base as tprofile_base
36 from yardstick.network_services.utils import get_nsb_option
37 from yardstick import ssh
38
39 traffic_profile.register_modules()
40
41
42 LOG = logging.getLogger(__name__)
43
44
45 class NetworkServiceTestCase(scenario_base.Scenario):
46     """Class handles Generic framework to do pre-deployment VNF &
47        Network service testing  """
48
49     __scenario_type__ = "NSPerf"
50
51     def __init__(self, scenario_cfg, context_cfg):  # Yardstick API
52         super(NetworkServiceTestCase, self).__init__()
53         self.scenario_cfg = scenario_cfg
54         self.context_cfg = context_cfg
55
56         self._render_topology()
57         self.vnfs = []
58         self.collector = None
59         self.traffic_profile = None
60         self.node_netdevs = {}
61         self.bin_path = get_nsb_option('bin_path', '')
62
63     def _get_ip_flow_range(self, ip_start_range):
64
65         # IP range is specified as 'x.x.x.x-y.y.y.y'
66         if isinstance(ip_start_range, six.string_types):
67             return ip_start_range
68
69         node_name, range_or_interface = next(iter(ip_start_range.items()), (None, '0.0.0.0'))
70         if node_name is None:
71             # we are manually specifying the range
72             ip_addr_range = range_or_interface
73         else:
74             node = self.context_cfg["nodes"].get(node_name, {})
75             try:
76                 # the ip_range is the interface name
77                 interface = node.get("interfaces", {})[range_or_interface]
78             except KeyError:
79                 ip = "0.0.0.0"
80                 mask = "255.255.255.0"
81             else:
82                 ip = interface["local_ip"]
83                 # we can't default these values, they must both exist to be valid
84                 mask = interface["netmask"]
85
86             ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), strict=False)
87             hosts = list(ipaddr.hosts())
88             if len(hosts) > 2:
89                 # skip the first host in case of gateway
90                 ip_addr_range = "{}-{}".format(hosts[1], hosts[-1])
91             else:
92                 LOG.warning("Only single IP in range %s", ipaddr)
93                 # fall back to single IP range
94                 ip_addr_range = ip
95         return ip_addr_range
96
97     def _get_traffic_flow(self):
98         flow = {}
99         try:
100             # TODO: should be .0  or .1 so we can use list
101             # but this also roughly matches uplink_0, downlink_0
102             fflow = self.scenario_cfg["options"]["flow"]
103             for index, src in enumerate(fflow.get("src_ip", [])):
104                 flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)
105
106             for index, dst in enumerate(fflow.get("dst_ip", [])):
107                 flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)
108
109             for index, publicip in enumerate(fflow.get("public_ip", [])):
110                 flow["public_ip_{}".format(index)] = publicip
111
112             for index, src_port in enumerate(fflow.get("src_port", [])):
113                 flow["src_port_{}".format(index)] = src_port
114
115             for index, dst_port in enumerate(fflow.get("dst_port", [])):
116                 flow["dst_port_{}".format(index)] = dst_port
117
118             flow["count"] = fflow["count"]
119         except KeyError:
120             flow = {}
121         return {"flow": flow}
122
123     def _get_traffic_imix(self):
124         try:
125             imix = {"imix": self.scenario_cfg['options']['framesize']}
126         except KeyError:
127             imix = {}
128         return imix
129
130     def _get_traffic_profile(self):
131         profile = self.scenario_cfg["traffic_profile"]
132         path = self.scenario_cfg["task_path"]
133         with utils.open_relative_file(profile, path) as infile:
134             return infile.read()
135
136     def _get_topology(self):
137         topology = self.scenario_cfg["topology"]
138         path = self.scenario_cfg["task_path"]
139         with utils.open_relative_file(topology, path) as infile:
140             return infile.read()
141
142     def _fill_traffic_profile(self):
143         tprofile = self._get_traffic_profile()
144         extra_args = self.scenario_cfg.get('extra_args', {})
145         tprofile_data = {
146             'flow': self._get_traffic_flow(),
147             'imix': self._get_traffic_imix(),
148             tprofile_base.TrafficProfile.UPLINK: {},
149             tprofile_base.TrafficProfile.DOWNLINK: {},
150             'extra_args': extra_args
151         }
152
153         traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
154         self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd)
155
156     def _render_topology(self):
157         topology = self._get_topology()
158         topology_args = self.scenario_cfg.get('extra_args', {})
159         topolgy_data = {
160             'extra_args': topology_args
161         }
162         topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
163         self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
164
165     def _find_vnf_name_from_id(self, vnf_id):
166         return next((vnfd["vnfd-id-ref"]
167                      for vnfd in self.topology["constituent-vnfd"]
168                      if vnf_id == vnfd["member-vnf-index"]), None)
169
170     def _find_vnfd_from_vnf_idx(self, vnf_id):
171         return next((vnfd
172                      for vnfd in self.topology["constituent-vnfd"]
173                      if vnf_id == vnfd["member-vnf-index"]), None)
174
175     @staticmethod
176     def find_node_if(nodes, name, if_name, vld_id):
177         try:
178             # check for xe0, xe1
179             intf = nodes[name]["interfaces"][if_name]
180         except KeyError:
181             # if not xe0, then maybe vld_id,  uplink_0, downlink_0
182             # pop it and re-insert with the correct name from topology
183             intf = nodes[name]["interfaces"].pop(vld_id)
184             nodes[name]["interfaces"][if_name] = intf
185         return intf
186
187     def _resolve_topology(self):
188         for vld in self.topology["vld"]:
189             try:
190                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
191             except (ValueError, TypeError):
192                 raise IncorrectConfig("Topology file corrupted, "
193                                       "wrong endpoint count for connection")
194
195             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
196             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
197
198             node0_if_name = node0_data["vnfd-connection-point-ref"]
199             node1_if_name = node1_data["vnfd-connection-point-ref"]
200
201             try:
202                 nodes = self.context_cfg["nodes"]
203                 node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
204                 node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
205
206                 # names so we can do reverse lookups
207                 node0_if["ifname"] = node0_if_name
208                 node1_if["ifname"] = node1_if_name
209
210                 node0_if["node_name"] = node0_name
211                 node1_if["node_name"] = node1_name
212
213                 node0_if["vld_id"] = vld["id"]
214                 node1_if["vld_id"] = vld["id"]
215
216                 # set peer name
217                 node0_if["peer_name"] = node1_name
218                 node1_if["peer_name"] = node0_name
219
220                 # set peer interface name
221                 node0_if["peer_ifname"] = node1_if_name
222                 node1_if["peer_ifname"] = node0_if_name
223
224                 # just load the network
225                 vld_networks = {n.get('vld_id', name): n for name, n in
226                                 self.context_cfg["networks"].items()}
227
228                 node0_if["network"] = vld_networks.get(vld["id"], {})
229                 node1_if["network"] = vld_networks.get(vld["id"], {})
230
231                 node0_if["dst_mac"] = node1_if["local_mac"]
232                 node0_if["dst_ip"] = node1_if["local_ip"]
233
234                 node1_if["dst_mac"] = node0_if["local_mac"]
235                 node1_if["dst_ip"] = node0_if["local_ip"]
236
237             except KeyError:
238                 LOG.exception("")
239                 raise IncorrectConfig("Required interface not found, "
240                                       "topology file corrupted")
241
242         for vld in self.topology['vld']:
243             try:
244                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
245             except (ValueError, TypeError):
246                 raise IncorrectConfig("Topology file corrupted, "
247                                       "wrong endpoint count for connection")
248
249             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
250             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
251
252             node0_if_name = node0_data["vnfd-connection-point-ref"]
253             node1_if_name = node1_data["vnfd-connection-point-ref"]
254
255             nodes = self.context_cfg["nodes"]
256             node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
257             node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
258
259             # add peer interface dict, but remove circular link
260             # TODO: don't waste memory
261             node0_copy = node0_if.copy()
262             node1_copy = node1_if.copy()
263             node0_if["peer_intf"] = node1_copy
264             node1_if["peer_intf"] = node0_copy
265
266     def _update_context_with_topology(self):
267         for vnfd in self.topology["constituent-vnfd"]:
268             vnf_idx = vnfd["member-vnf-index"]
269             vnf_name = self._find_vnf_name_from_id(vnf_idx)
270             vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
271             self.context_cfg["nodes"][vnf_name].update(vnfd)
272
273     def _generate_pod_yaml(self):
274         context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
275         # convert OrderedDict to a list
276         # pod.yaml nodes is a list
277         nodes = [self._serialize_node(node) for node in self.context_cfg["nodes"].values()]
278         pod_dict = {
279             "nodes": nodes,
280             "networks": self.context_cfg["networks"]
281         }
282         with open(context_yaml, "w") as context_out:
283             yaml.safe_dump(pod_dict, context_out, default_flow_style=False,
284                            explicit_start=True)
285
286     @staticmethod
287     def _serialize_node(node):
288         new_node = copy.deepcopy(node)
289         # name field is required
290         # remove context suffix
291         new_node["name"] = node['name'].split('.')[0]
292         try:
293             new_node["pkey"] = ssh.convert_key_to_str(node["pkey"])
294         except KeyError:
295             pass
296         return new_node
297
298     def map_topology_to_infrastructure(self):
299         """ This method should verify if the available resources defined in pod.yaml
300         match the topology.yaml file.
301
302         :return: None. Side effect: context_cfg is updated
303         """
304         # 3. Use topology file to find connections & resolve dest address
305         self._resolve_topology()
306         self._update_context_with_topology()
307
308     @classmethod
309     def get_vnf_impl(cls, vnf_model_id):
310         """ Find the implementing class from vnf_model["vnf"]["name"] field
311
312         :param vnf_model_id: parsed vnfd model ID field
313         :return: subclass of GenericVNF
314         """
315         utils.import_modules_from_package(
316             "yardstick.network_services.vnf_generic.vnf")
317         expected_name = vnf_model_id
318         classes_found = []
319
320         def impl():
321             for name, class_ in ((c.__name__, c) for c in
322                                  utils.itersubclasses(GenericVNF)):
323                 if name == expected_name:
324                     yield class_
325                 classes_found.append(name)
326
327         try:
328             return next(impl())
329         except StopIteration:
330             pass
331
332         raise IncorrectConfig("No implementation for %s found in %s" %
333                               (expected_name, classes_found))
334
335     @staticmethod
336     def create_interfaces_from_node(vnfd, node):
337         ext_intfs = vnfd["vdu"][0]["external-interface"] = []
338         # have to sort so xe0 goes first
339         for intf_name, intf in sorted(node['interfaces'].items()):
340             # only interfaces with vld_id are added.
341             # Thus there are two layers of filters, only intefaces with vld_id
342             # show up in interfaces, and only interfaces with traffic profiles
343             # are used by the generators
344             if intf.get('vld_id'):
345                 # force dpkd_port_num to int so we can do reverse lookup
346                 try:
347                     intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
348                 except KeyError:
349                     pass
350                 ext_intf = {
351                     "name": intf_name,
352                     "virtual-interface": intf,
353                     "vnfd-connection-point-ref": intf_name,
354                 }
355                 ext_intfs.append(ext_intf)
356
357     def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
358         """ Create VNF objects based on YAML descriptors
359
360         :param scenario_cfg:
361         :type scenario_cfg:
362         :param context_cfg:
363         :return:
364         """
365         trex_lib_path = get_nsb_option('trex_client_lib')
366         sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))
367
368         if scenario_cfg is None:
369             scenario_cfg = self.scenario_cfg
370
371         if context_cfg is None:
372             context_cfg = self.context_cfg
373
374         vnfs = []
375         # we assume OrderedDict for consistency in instantiation
376         for node_name, node in context_cfg["nodes"].items():
377             LOG.debug(node)
378             try:
379                 file_name = node["VNF model"]
380             except KeyError:
381                 LOG.debug("no model for %s, skipping", node_name)
382                 continue
383             file_path = scenario_cfg['task_path']
384             with utils.open_relative_file(file_name, file_path) as stream:
385                 vnf_model = stream.read()
386             vnfd = vnfdgen.generate_vnfd(vnf_model, node)
387             # TODO: here add extra context_cfg["nodes"] regardless of template
388             vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
389             # force inject pkey if it exists
390             # we want to standardize Heat using pkey as a string so we don't rely
391             # on the filesystem
392             try:
393                 vnfd['mgmt-interface']['pkey'] = node['pkey']
394             except KeyError:
395                 pass
396             self.create_interfaces_from_node(vnfd, node)
397             vnf_impl = self.get_vnf_impl(vnfd['id'])
398             vnf_instance = vnf_impl(node_name, vnfd)
399             vnfs.append(vnf_instance)
400
401         self.vnfs = vnfs
402         return vnfs
403
404     def setup(self):
405         """ Setup infrastructure, provission VNFs & start traffic
406
407         :return:
408         """
409         # 1. Verify if infrastructure mapping can meet topology
410         self.map_topology_to_infrastructure()
411         # 1a. Load VNF models
412         self.load_vnf_models()
413         # 1b. Fill traffic profile with information from topology
414         self._fill_traffic_profile()
415
416         # 2. Provision VNFs
417
418         # link events will cause VNF application to exit
419         # so we should start traffic runners before VNFs
420         traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
421         non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
422         try:
423             for vnf in chain(traffic_runners, non_traffic_runners):
424                 LOG.info("Instantiating %s", vnf.name)
425                 vnf.instantiate(self.scenario_cfg, self.context_cfg)
426                 LOG.info("Waiting for %s to instantiate", vnf.name)
427                 vnf.wait_for_instantiate()
428         except:
429             LOG.exception("")
430             for vnf in self.vnfs:
431                 vnf.terminate()
432             raise
433
434         # we have to generate pod.yaml here after VNF has probed so we know vpci and driver
435         self._generate_pod_yaml()
436
437         # 3. Run experiment
438         # Start listeners first to avoid losing packets
439         for traffic_gen in traffic_runners:
440             traffic_gen.listen_traffic(self.traffic_profile)
441
442         # register collector with yardstick for KPI collection.
443         self.collector = Collector(self.vnfs, self.context_cfg["nodes"], self.traffic_profile)
444         self.collector.start()
445
446         # Start the actual traffic
447         for traffic_gen in traffic_runners:
448             LOG.info("Starting traffic on %s", traffic_gen.name)
449             traffic_gen.run_traffic(self.traffic_profile)
450
451     def run(self, result):  # yardstick API
452         """ Yardstick calls run() at intervals defined in the yaml and
453             produces timestamped samples
454
455         :param result: dictionary with results to update
456         :return: None
457         """
458
459         # this is the only method that is check from the runner
460         # so if we have any fatal error it must be raised via these methods
461         # otherwise we will not terminate
462
463         result.update(self.collector.get_kpi())
464
465     def teardown(self):
466         """ Stop the collector and terminate VNF & TG instance
467
468         :return
469         """
470
471         try:
472             try:
473                 self.collector.stop()
474                 for vnf in self.vnfs:
475                     LOG.info("Stopping %s", vnf.name)
476                     vnf.terminate()
477                 LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
478             finally:
479                 terminate_children()
480         except Exception:
481             # catch any exception in teardown and convert to simple exception
482             # never pass exceptions back to multiprocessing, because some exceptions can
483             # be unpicklable
484             # https://bugs.python.org/issue9400
485             LOG.exception("")
486             raise RuntimeError("Error in teardown")