Merge "Document for Euphrates test case results"
[yardstick.git] / yardstick / benchmark / scenarios / networking / vnf_generic.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import copy
16 import logging
17 import time
18
19 import ipaddress
20 from itertools import chain
21 import os
22 import sys
23
24 import six
25 import yaml
26
27 from yardstick.benchmark.scenarios import base as scenario_base
28 from yardstick.error import IncorrectConfig
29 from yardstick.common.constants import LOG_DIR
30 from yardstick.common.process import terminate_children
31 from yardstick.common import utils
32 from yardstick.network_services.collector.subscriber import Collector
33 from yardstick.network_services.vnf_generic import vnfdgen
34 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
35 from yardstick.network_services import traffic_profile
36 from yardstick.network_services.traffic_profile import base as tprofile_base
37 from yardstick.network_services.utils import get_nsb_option
38 from yardstick import ssh
39
40 traffic_profile.register_modules()
41
42
43 LOG = logging.getLogger(__name__)
44
45
46 class NetworkServiceTestCase(scenario_base.Scenario):
47     """Class handles Generic framework to do pre-deployment VNF &
48        Network service testing  """
49
50     __scenario_type__ = "NSPerf"
51
52     def __init__(self, scenario_cfg, context_cfg):  # Yardstick API
53         super(NetworkServiceTestCase, self).__init__()
54         self.scenario_cfg = scenario_cfg
55         self.context_cfg = context_cfg
56
57         self._render_topology()
58         self.vnfs = []
59         self.collector = None
60         self.traffic_profile = None
61         self.node_netdevs = {}
62         self.bin_path = get_nsb_option('bin_path', '')
63
64     def _get_ip_flow_range(self, ip_start_range):
65
66         # IP range is specified as 'x.x.x.x-y.y.y.y'
67         if isinstance(ip_start_range, six.string_types):
68             return ip_start_range
69
70         node_name, range_or_interface = next(iter(ip_start_range.items()), (None, '0.0.0.0'))
71         if node_name is None:
72             # we are manually specifying the range
73             ip_addr_range = range_or_interface
74         else:
75             node = self.context_cfg["nodes"].get(node_name, {})
76             try:
77                 # the ip_range is the interface name
78                 interface = node.get("interfaces", {})[range_or_interface]
79             except KeyError:
80                 ip = "0.0.0.0"
81                 mask = "255.255.255.0"
82             else:
83                 ip = interface["local_ip"]
84                 # we can't default these values, they must both exist to be valid
85                 mask = interface["netmask"]
86
87             ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), strict=False)
88             hosts = list(ipaddr.hosts())
89             if len(hosts) > 2:
90                 # skip the first host in case of gateway
91                 ip_addr_range = "{}-{}".format(hosts[1], hosts[-1])
92             else:
93                 LOG.warning("Only single IP in range %s", ipaddr)
94                 # fall back to single IP range
95                 ip_addr_range = ip
96         return ip_addr_range
97
98     def _get_traffic_flow(self):
99         flow = {}
100         try:
101             # TODO: should be .0  or .1 so we can use list
102             # but this also roughly matches uplink_0, downlink_0
103             fflow = self.scenario_cfg["options"]["flow"]
104             for index, src in enumerate(fflow.get("src_ip", [])):
105                 flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)
106
107             for index, dst in enumerate(fflow.get("dst_ip", [])):
108                 flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)
109
110             for index, publicip in enumerate(fflow.get("public_ip", [])):
111                 flow["public_ip_{}".format(index)] = publicip
112
113             for index, src_port in enumerate(fflow.get("src_port", [])):
114                 flow["src_port_{}".format(index)] = src_port
115
116             for index, dst_port in enumerate(fflow.get("dst_port", [])):
117                 flow["dst_port_{}".format(index)] = dst_port
118
119             flow["count"] = fflow["count"]
120         except KeyError:
121             flow = {}
122         return {"flow": flow}
123
124     def _get_traffic_imix(self):
125         try:
126             imix = {"imix": self.scenario_cfg['options']['framesize']}
127         except KeyError:
128             imix = {}
129         return imix
130
131     def _get_traffic_profile(self):
132         profile = self.scenario_cfg["traffic_profile"]
133         path = self.scenario_cfg["task_path"]
134         with utils.open_relative_file(profile, path) as infile:
135             return infile.read()
136
137     def _get_topology(self):
138         topology = self.scenario_cfg["topology"]
139         path = self.scenario_cfg["task_path"]
140         with utils.open_relative_file(topology, path) as infile:
141             return infile.read()
142
143     def _fill_traffic_profile(self):
144         tprofile = self._get_traffic_profile()
145         extra_args = self.scenario_cfg.get('extra_args', {})
146         tprofile_data = {
147             'flow': self._get_traffic_flow(),
148             'imix': self._get_traffic_imix(),
149             tprofile_base.TrafficProfile.UPLINK: {},
150             tprofile_base.TrafficProfile.DOWNLINK: {},
151             'extra_args': extra_args
152         }
153
154         traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
155         self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd)
156
157     def _render_topology(self):
158         topology = self._get_topology()
159         topology_args = self.scenario_cfg.get('extra_args', {})
160         topolgy_data = {
161             'extra_args': topology_args
162         }
163         topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
164         self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
165
166     def _find_vnf_name_from_id(self, vnf_id):
167         return next((vnfd["vnfd-id-ref"]
168                      for vnfd in self.topology["constituent-vnfd"]
169                      if vnf_id == vnfd["member-vnf-index"]), None)
170
171     def _find_vnfd_from_vnf_idx(self, vnf_id):
172         return next((vnfd
173                      for vnfd in self.topology["constituent-vnfd"]
174                      if vnf_id == vnfd["member-vnf-index"]), None)
175
176     @staticmethod
177     def find_node_if(nodes, name, if_name, vld_id):
178         try:
179             # check for xe0, xe1
180             intf = nodes[name]["interfaces"][if_name]
181         except KeyError:
182             # if not xe0, then maybe vld_id,  uplink_0, downlink_0
183             # pop it and re-insert with the correct name from topology
184             intf = nodes[name]["interfaces"].pop(vld_id)
185             nodes[name]["interfaces"][if_name] = intf
186         return intf
187
188     def _resolve_topology(self):
189         for vld in self.topology["vld"]:
190             try:
191                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
192             except (ValueError, TypeError):
193                 raise IncorrectConfig("Topology file corrupted, "
194                                       "wrong endpoint count for connection")
195
196             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
197             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
198
199             node0_if_name = node0_data["vnfd-connection-point-ref"]
200             node1_if_name = node1_data["vnfd-connection-point-ref"]
201
202             try:
203                 nodes = self.context_cfg["nodes"]
204                 node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
205                 node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
206
207                 # names so we can do reverse lookups
208                 node0_if["ifname"] = node0_if_name
209                 node1_if["ifname"] = node1_if_name
210
211                 node0_if["node_name"] = node0_name
212                 node1_if["node_name"] = node1_name
213
214                 node0_if["vld_id"] = vld["id"]
215                 node1_if["vld_id"] = vld["id"]
216
217                 # set peer name
218                 node0_if["peer_name"] = node1_name
219                 node1_if["peer_name"] = node0_name
220
221                 # set peer interface name
222                 node0_if["peer_ifname"] = node1_if_name
223                 node1_if["peer_ifname"] = node0_if_name
224
225                 # just load the network
226                 vld_networks = {n.get('vld_id', name): n for name, n in
227                                 self.context_cfg["networks"].items()}
228
229                 node0_if["network"] = vld_networks.get(vld["id"], {})
230                 node1_if["network"] = vld_networks.get(vld["id"], {})
231
232                 node0_if["dst_mac"] = node1_if["local_mac"]
233                 node0_if["dst_ip"] = node1_if["local_ip"]
234
235                 node1_if["dst_mac"] = node0_if["local_mac"]
236                 node1_if["dst_ip"] = node0_if["local_ip"]
237
238             except KeyError:
239                 LOG.exception("")
240                 raise IncorrectConfig("Required interface not found, "
241                                       "topology file corrupted")
242
243         for vld in self.topology['vld']:
244             try:
245                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
246             except (ValueError, TypeError):
247                 raise IncorrectConfig("Topology file corrupted, "
248                                       "wrong endpoint count for connection")
249
250             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
251             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
252
253             node0_if_name = node0_data["vnfd-connection-point-ref"]
254             node1_if_name = node1_data["vnfd-connection-point-ref"]
255
256             nodes = self.context_cfg["nodes"]
257             node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
258             node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
259
260             # add peer interface dict, but remove circular link
261             # TODO: don't waste memory
262             node0_copy = node0_if.copy()
263             node1_copy = node1_if.copy()
264             node0_if["peer_intf"] = node1_copy
265             node1_if["peer_intf"] = node0_copy
266
267     def _update_context_with_topology(self):
268         for vnfd in self.topology["constituent-vnfd"]:
269             vnf_idx = vnfd["member-vnf-index"]
270             vnf_name = self._find_vnf_name_from_id(vnf_idx)
271             vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
272             self.context_cfg["nodes"][vnf_name].update(vnfd)
273
274     def _generate_pod_yaml(self):
275         context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
276         # convert OrderedDict to a list
277         # pod.yaml nodes is a list
278         nodes = [self._serialize_node(node) for node in self.context_cfg["nodes"].values()]
279         pod_dict = {
280             "nodes": nodes,
281             "networks": self.context_cfg["networks"]
282         }
283         with open(context_yaml, "w") as context_out:
284             yaml.safe_dump(pod_dict, context_out, default_flow_style=False,
285                            explicit_start=True)
286
287     @staticmethod
288     def _serialize_node(node):
289         new_node = copy.deepcopy(node)
290         # name field is required
291         # remove context suffix
292         new_node["name"] = node['name'].split('.')[0]
293         try:
294             new_node["pkey"] = ssh.convert_key_to_str(node["pkey"])
295         except KeyError:
296             pass
297         return new_node
298
299     def map_topology_to_infrastructure(self):
300         """ This method should verify if the available resources defined in pod.yaml
301         match the topology.yaml file.
302
303         :return: None. Side effect: context_cfg is updated
304         """
305         # 3. Use topology file to find connections & resolve dest address
306         self._resolve_topology()
307         self._update_context_with_topology()
308
309     @classmethod
310     def get_vnf_impl(cls, vnf_model_id):
311         """ Find the implementing class from vnf_model["vnf"]["name"] field
312
313         :param vnf_model_id: parsed vnfd model ID field
314         :return: subclass of GenericVNF
315         """
316         utils.import_modules_from_package(
317             "yardstick.network_services.vnf_generic.vnf")
318         expected_name = vnf_model_id
319         classes_found = []
320
321         def impl():
322             for name, class_ in ((c.__name__, c) for c in
323                                  utils.itersubclasses(GenericVNF)):
324                 if name == expected_name:
325                     yield class_
326                 classes_found.append(name)
327
328         try:
329             return next(impl())
330         except StopIteration:
331             pass
332
333         raise IncorrectConfig("No implementation for %s found in %s" %
334                               (expected_name, classes_found))
335
336     @staticmethod
337     def create_interfaces_from_node(vnfd, node):
338         ext_intfs = vnfd["vdu"][0]["external-interface"] = []
339         # have to sort so xe0 goes first
340         for intf_name, intf in sorted(node['interfaces'].items()):
341             # only interfaces with vld_id are added.
342             # Thus there are two layers of filters, only intefaces with vld_id
343             # show up in interfaces, and only interfaces with traffic profiles
344             # are used by the generators
345             if intf.get('vld_id'):
346                 # force dpkd_port_num to int so we can do reverse lookup
347                 try:
348                     intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
349                 except KeyError:
350                     pass
351                 ext_intf = {
352                     "name": intf_name,
353                     "virtual-interface": intf,
354                     "vnfd-connection-point-ref": intf_name,
355                 }
356                 ext_intfs.append(ext_intf)
357
358     def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
359         """ Create VNF objects based on YAML descriptors
360
361         :param scenario_cfg:
362         :type scenario_cfg:
363         :param context_cfg:
364         :return:
365         """
366         trex_lib_path = get_nsb_option('trex_client_lib')
367         sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))
368
369         if scenario_cfg is None:
370             scenario_cfg = self.scenario_cfg
371
372         if context_cfg is None:
373             context_cfg = self.context_cfg
374
375         vnfs = []
376         # we assume OrderedDict for consistency in instantiation
377         for node_name, node in context_cfg["nodes"].items():
378             LOG.debug(node)
379             try:
380                 file_name = node["VNF model"]
381             except KeyError:
382                 LOG.debug("no model for %s, skipping", node_name)
383                 continue
384             file_path = scenario_cfg['task_path']
385             with utils.open_relative_file(file_name, file_path) as stream:
386                 vnf_model = stream.read()
387             vnfd = vnfdgen.generate_vnfd(vnf_model, node)
388             # TODO: here add extra context_cfg["nodes"] regardless of template
389             vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
390             # force inject pkey if it exists
391             # we want to standardize Heat using pkey as a string so we don't rely
392             # on the filesystem
393             try:
394                 vnfd['mgmt-interface']['pkey'] = node['pkey']
395             except KeyError:
396                 pass
397             self.create_interfaces_from_node(vnfd, node)
398             vnf_impl = self.get_vnf_impl(vnfd['id'])
399             vnf_instance = vnf_impl(node_name, vnfd)
400             vnfs.append(vnf_instance)
401
402         self.vnfs = vnfs
403         return vnfs
404
405     def setup(self):
406         """ Setup infrastructure, provission VNFs & start traffic
407
408         :return:
409         """
410         # 1. Verify if infrastructure mapping can meet topology
411         self.map_topology_to_infrastructure()
412         # 1a. Load VNF models
413         self.load_vnf_models()
414         # 1b. Fill traffic profile with information from topology
415         self._fill_traffic_profile()
416
417         # 2. Provision VNFs
418
419         # link events will cause VNF application to exit
420         # so we should start traffic runners before VNFs
421         traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
422         non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
423         try:
424             for vnf in chain(traffic_runners, non_traffic_runners):
425                 LOG.info("Instantiating %s", vnf.name)
426                 vnf.instantiate(self.scenario_cfg, self.context_cfg)
427                 LOG.info("Waiting for %s to instantiate", vnf.name)
428                 vnf.wait_for_instantiate()
429         except:
430             LOG.exception("")
431             for vnf in self.vnfs:
432                 vnf.terminate()
433             raise
434
435         # we have to generate pod.yaml here after VNF has probed so we know vpci and driver
436         self._generate_pod_yaml()
437
438         # 3. Run experiment
439         # Start listeners first to avoid losing packets
440         for traffic_gen in traffic_runners:
441             traffic_gen.listen_traffic(self.traffic_profile)
442
443         # register collector with yardstick for KPI collection.
444         self.collector = Collector(self.vnfs)
445         self.collector.start()
446
447         # Start the actual traffic
448         for traffic_gen in traffic_runners:
449             LOG.info("Starting traffic on %s", traffic_gen.name)
450             traffic_gen.run_traffic(self.traffic_profile)
451
452     def run(self, result):  # yardstick API
453         """ Yardstick calls run() at intervals defined in the yaml and
454             produces timestamped samples
455
456         :param result: dictionary with results to update
457         :return: None
458         """
459
460         # this is the only method that is check from the runner
461         # so if we have any fatal error it must be raised via these methods
462         # otherwise we will not terminate
463
464         result.update(self.collector.get_kpi())
465
466     def teardown(self):
467         """ Stop the collector and terminate VNF & TG instance
468
469         :return
470         """
471
472         try:
473             try:
474                 self.collector.stop()
475                 for vnf in self.vnfs:
476                     LOG.info("Stopping %s", vnf.name)
477                     vnf.terminate()
478                 LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
479             finally:
480                 terminate_children()
481         except Exception:
482             # catch any exception in teardown and convert to simple exception
483             # never pass exceptions back to multiprocessing, because some exceptions can
484             # be unpicklable
485             # https://bugs.python.org/issue9400
486             LOG.exception("")
487             raise RuntimeError("Error in teardown")
488
489     def pre_run_wait_time(self, time_seconds):
490         """Time waited before executing the run method"""
491         time.sleep(time_seconds)
492
493     def post_run_wait_time(self, time_seconds):
494         """Time waited after executing the run method"""
495         pass