Merge "Adding Test Cases for Prox PktTouch Standalone OvS-DPDK"
[yardstick.git] / yardstick / benchmark / scenarios / networking / vnf_generic.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import copy
16 import ipaddress
17 from itertools import chain
18 import logging
19 import os
20 import sys
21 import time
22
23 import six
24 import yaml
25
26 from yardstick.benchmark.contexts import base as context_base
27 from yardstick.benchmark.scenarios import base as scenario_base
28 from yardstick.common.constants import LOG_DIR
29 from yardstick.common import exceptions
30 from yardstick.common.process import terminate_children
31 from yardstick.common import utils
32 from yardstick.network_services.collector.subscriber import Collector
33 from yardstick.network_services.vnf_generic import vnfdgen
34 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
35 from yardstick.network_services import traffic_profile
36 from yardstick.network_services.traffic_profile import base as tprofile_base
37 from yardstick.network_services.utils import get_nsb_option
38 from yardstick import ssh
39
40
41 traffic_profile.register_modules()
42
43
44 LOG = logging.getLogger(__name__)
45
46
47 class NetworkServiceTestCase(scenario_base.Scenario):
48     """Class handles Generic framework to do pre-deployment VNF &
49        Network service testing  """
50
51     __scenario_type__ = "NSPerf"
52
53     def __init__(self, scenario_cfg, context_cfg):  # pragma: no cover
54         super(NetworkServiceTestCase, self).__init__()
55         self.scenario_cfg = scenario_cfg
56         self.context_cfg = context_cfg
57
58         self._render_topology()
59         self.vnfs = []
60         self.collector = None
61         self.traffic_profile = None
62         self.node_netdevs = {}
63         self.bin_path = get_nsb_option('bin_path', '')
64         self._mq_ids = []
65
66     def is_ended(self):
67         return self.traffic_profile is not None and self.traffic_profile.is_ended()
68
69     def _get_ip_flow_range(self, ip_start_range):
70         """Retrieve a CIDR first and last viable IPs
71
72         :param ip_start_range: could be the IP range itself or a dictionary
73                with the host name and the port.
74         :return: (str) IP range (min, max) with this format "x.x.x.x-y.y.y.y"
75         """
76         if isinstance(ip_start_range, six.string_types):
77             return ip_start_range
78
79         node_name, range_or_interface = next(iter(ip_start_range.items()),
80                                              (None, '0.0.0.0'))
81         if node_name is None:
82             return range_or_interface
83
84         node = self.context_cfg['nodes'].get(node_name, {})
85         interface = node.get('interfaces', {}).get(range_or_interface)
86         if interface:
87             ip = interface['local_ip']
88             mask = interface['netmask']
89         else:
90             ip = '0.0.0.0'
91             mask = '255.255.255.0'
92
93         ipaddr = ipaddress.ip_network(
94             six.text_type('{}/{}'.format(ip, mask)), strict=False)
95         if ipaddr.prefixlen + 2 < ipaddr.max_prefixlen:
96             ip_addr_range = '{}-{}'.format(ipaddr[2], ipaddr[-2])
97         else:
98             LOG.warning('Only single IP in range %s', ipaddr)
99             ip_addr_range = ip
100         return ip_addr_range
101
102     def _get_traffic_flow(self):
103         flow = {}
104         try:
105             # TODO: should be .0  or .1 so we can use list
106             # but this also roughly matches uplink_0, downlink_0
107             fflow = self.scenario_cfg["options"]["flow"]
108             for index, src in enumerate(fflow.get("src_ip", [])):
109                 flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)
110
111             for index, dst in enumerate(fflow.get("dst_ip", [])):
112                 flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)
113
114             for index, publicip in enumerate(fflow.get("public_ip", [])):
115                 flow["public_ip_{}".format(index)] = publicip
116
117             for index, src_port in enumerate(fflow.get("src_port", [])):
118                 flow["src_port_{}".format(index)] = src_port
119
120             for index, dst_port in enumerate(fflow.get("dst_port", [])):
121                 flow["dst_port_{}".format(index)] = dst_port
122
123             if "count" in fflow:
124                 flow["count"] = fflow["count"]
125
126             if "srcseed" in fflow:
127                 flow["srcseed"] = fflow["srcseed"]
128
129             if "dstseed" in fflow:
130                 flow["dstseed"] = fflow["dstseed"]
131
132         except KeyError:
133             flow = {}
134         return {"flow": flow}
135
136     def _get_traffic_imix(self):
137         try:
138             imix = {"imix": self.scenario_cfg['options']['framesize']}
139         except KeyError:
140             imix = {}
141         return imix
142
143     def _get_traffic_profile(self):
144         profile = self.scenario_cfg["traffic_profile"]
145         path = self.scenario_cfg["task_path"]
146         with utils.open_relative_file(profile, path) as infile:
147             return infile.read()
148
149     def _get_duration(self):
150         options = self.scenario_cfg.get('options', {})
151         return options.get('duration',
152                            tprofile_base.TrafficProfileConfig.DEFAULT_DURATION)
153
154     def _fill_traffic_profile(self):
155         tprofile = self._get_traffic_profile()
156         extra_args = self.scenario_cfg.get('extra_args', {})
157         tprofile_data = {
158             'flow': self._get_traffic_flow(),
159             'imix': self._get_traffic_imix(),
160             tprofile_base.TrafficProfile.UPLINK: {},
161             tprofile_base.TrafficProfile.DOWNLINK: {},
162             'extra_args': extra_args,
163             'duration': self._get_duration()}
164
165         traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
166
167         traffic_config = \
168             self.scenario_cfg.get("options", {}).get("traffic_config", {})
169
170         traffic_vnfd.setdefault("traffic_profile", {})
171         traffic_vnfd["traffic_profile"].update(traffic_config)
172
173         self.traffic_profile = \
174             tprofile_base.TrafficProfile.get(traffic_vnfd)
175
176     def _get_topology(self):
177         topology = self.scenario_cfg["topology"]
178         path = self.scenario_cfg["task_path"]
179         with utils.open_relative_file(topology, path) as infile:
180             return infile.read()
181
182     def _render_topology(self):
183         topology = self._get_topology()
184         topology_args = self.scenario_cfg.get('extra_args', {})
185         topolgy_data = {
186             'extra_args': topology_args
187         }
188         topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
189         self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
190
191     def _find_vnf_name_from_id(self, vnf_id):  # pragma: no cover
192         return next((vnfd["vnfd-id-ref"]
193                      for vnfd in self.topology["constituent-vnfd"]
194                      if vnf_id == vnfd["member-vnf-index"]), None)
195
196     def _find_vnfd_from_vnf_idx(self, vnf_id):  # pragma: no cover
197         return next((vnfd
198                      for vnfd in self.topology["constituent-vnfd"]
199                      if vnf_id == vnfd["member-vnf-index"]), None)
200
201     @staticmethod
202     def find_node_if(nodes, name, if_name, vld_id):  # pragma: no cover
203         try:
204             # check for xe0, xe1
205             intf = nodes[name]["interfaces"][if_name]
206         except KeyError:
207             # if not xe0, then maybe vld_id,  uplink_0, downlink_0
208             # pop it and re-insert with the correct name from topology
209             intf = nodes[name]["interfaces"].pop(vld_id)
210             nodes[name]["interfaces"][if_name] = intf
211         return intf
212
213     def _resolve_topology(self):
214         for vld in self.topology["vld"]:
215             try:
216                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
217             except (ValueError, TypeError):
218                 raise exceptions.IncorrectConfig(
219                     error_msg='Topology file corrupted, wrong endpoint count '
220                               'for connection')
221
222             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
223             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
224
225             node0_if_name = node0_data["vnfd-connection-point-ref"]
226             node1_if_name = node1_data["vnfd-connection-point-ref"]
227
228             try:
229                 nodes = self.context_cfg["nodes"]
230                 node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
231                 node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
232
233                 # names so we can do reverse lookups
234                 node0_if["ifname"] = node0_if_name
235                 node1_if["ifname"] = node1_if_name
236
237                 node0_if["node_name"] = node0_name
238                 node1_if["node_name"] = node1_name
239
240                 node0_if["vld_id"] = vld["id"]
241                 node1_if["vld_id"] = vld["id"]
242
243                 # set peer name
244                 node0_if["peer_name"] = node1_name
245                 node1_if["peer_name"] = node0_name
246
247                 # set peer interface name
248                 node0_if["peer_ifname"] = node1_if_name
249                 node1_if["peer_ifname"] = node0_if_name
250
251                 # just load the network
252                 vld_networks = {n.get('vld_id', name): n for name, n in
253                                 self.context_cfg["networks"].items()}
254
255                 node0_if["network"] = vld_networks.get(vld["id"], {})
256                 node1_if["network"] = vld_networks.get(vld["id"], {})
257
258                 node0_if["dst_mac"] = node1_if["local_mac"]
259                 node0_if["dst_ip"] = node1_if["local_ip"]
260
261                 node1_if["dst_mac"] = node0_if["local_mac"]
262                 node1_if["dst_ip"] = node0_if["local_ip"]
263
264             except KeyError:
265                 LOG.exception("")
266                 raise exceptions.IncorrectConfig(
267                     error_msg='Required interface not found, topology file '
268                               'corrupted')
269
270         for vld in self.topology['vld']:
271             try:
272                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
273             except (ValueError, TypeError):
274                 raise exceptions.IncorrectConfig(
275                     error_msg='Topology file corrupted, wrong endpoint count '
276                               'for connection')
277
278             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
279             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
280
281             node0_if_name = node0_data["vnfd-connection-point-ref"]
282             node1_if_name = node1_data["vnfd-connection-point-ref"]
283
284             nodes = self.context_cfg["nodes"]
285             node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
286             node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
287
288             # add peer interface dict, but remove circular link
289             # TODO: don't waste memory
290             node0_copy = node0_if.copy()
291             node1_copy = node1_if.copy()
292             node0_if["peer_intf"] = node1_copy
293             node1_if["peer_intf"] = node0_copy
294
295     def _update_context_with_topology(self):  # pragma: no cover
296         for vnfd in self.topology["constituent-vnfd"]:
297             vnf_idx = vnfd["member-vnf-index"]
298             vnf_name = self._find_vnf_name_from_id(vnf_idx)
299             vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
300             self.context_cfg["nodes"][vnf_name].update(vnfd)
301
302     def _generate_pod_yaml(self):  # pragma: no cover
303         context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
304         # convert OrderedDict to a list
305         # pod.yaml nodes is a list
306         nodes = [self._serialize_node(node) for node in self.context_cfg["nodes"].values()]
307         pod_dict = {
308             "nodes": nodes,
309             "networks": self.context_cfg["networks"]
310         }
311         with open(context_yaml, "w") as context_out:
312             yaml.safe_dump(pod_dict, context_out, default_flow_style=False,
313                            explicit_start=True)
314
315     @staticmethod
316     def _serialize_node(node):  # pragma: no cover
317         new_node = copy.deepcopy(node)
318         # name field is required
319         # remove context suffix
320         new_node["name"] = node['name'].split('.')[0]
321         try:
322             new_node["pkey"] = ssh.convert_key_to_str(node["pkey"])
323         except KeyError:
324             pass
325         return new_node
326
327     def map_topology_to_infrastructure(self):
328         """ This method should verify if the available resources defined in pod.yaml
329         match the topology.yaml file.
330
331         :return: None. Side effect: context_cfg is updated
332         """
333         # 3. Use topology file to find connections & resolve dest address
334         self._resolve_topology()
335         self._update_context_with_topology()
336
337     @classmethod
338     def get_vnf_impl(cls, vnf_model_id):  # pragma: no cover
339         """ Find the implementing class from vnf_model["vnf"]["name"] field
340
341         :param vnf_model_id: parsed vnfd model ID field
342         :return: subclass of GenericVNF
343         """
344         utils.import_modules_from_package(
345             "yardstick.network_services.vnf_generic.vnf")
346         expected_name = vnf_model_id
347         classes_found = []
348
349         def impl():
350             for name, class_ in ((c.__name__, c) for c in
351                                  utils.itersubclasses(GenericVNF)):
352                 if name == expected_name:
353                     yield class_
354                 classes_found.append(name)
355
356         try:
357             return next(impl())
358         except StopIteration:
359             pass
360
361         message = ('No implementation for %s found in %s'
362                    % (expected_name, classes_found))
363         raise exceptions.IncorrectConfig(error_msg=message)
364
365     @staticmethod
366     def create_interfaces_from_node(vnfd, node):  # pragma: no cover
367         ext_intfs = vnfd["vdu"][0]["external-interface"] = []
368         # have to sort so xe0 goes first
369         for intf_name, intf in sorted(node['interfaces'].items()):
370             # only interfaces with vld_id are added.
371             # Thus there are two layers of filters, only intefaces with vld_id
372             # show up in interfaces, and only interfaces with traffic profiles
373             # are used by the generators
374             if intf.get('vld_id'):
375                 # force dpkd_port_num to int so we can do reverse lookup
376                 try:
377                     intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
378                 except KeyError:
379                     pass
380                 ext_intf = {
381                     "name": intf_name,
382                     "virtual-interface": intf,
383                     "vnfd-connection-point-ref": intf_name,
384                 }
385                 ext_intfs.append(ext_intf)
386
387     def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
388         """ Create VNF objects based on YAML descriptors
389
390         :param scenario_cfg:
391         :type scenario_cfg:
392         :param context_cfg:
393         :return:
394         """
395         trex_lib_path = get_nsb_option('trex_client_lib')
396         sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))
397
398         if scenario_cfg is None:
399             scenario_cfg = self.scenario_cfg
400
401         if context_cfg is None:
402             context_cfg = self.context_cfg
403
404         vnfs = []
405         # we assume OrderedDict for consistency in instantiation
406         for node_name, node in context_cfg["nodes"].items():
407             LOG.debug(node)
408             try:
409                 file_name = node["VNF model"]
410             except KeyError:
411                 LOG.debug("no model for %s, skipping", node_name)
412                 continue
413             file_path = scenario_cfg['task_path']
414             with utils.open_relative_file(file_name, file_path) as stream:
415                 vnf_model = stream.read()
416             vnfd = vnfdgen.generate_vnfd(vnf_model, node)
417             # TODO: here add extra context_cfg["nodes"] regardless of template
418             vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
419             # force inject pkey if it exists
420             # we want to standardize Heat using pkey as a string so we don't rely
421             # on the filesystem
422             try:
423                 vnfd['mgmt-interface']['pkey'] = node['pkey']
424             except KeyError:
425                 pass
426             self.create_interfaces_from_node(vnfd, node)
427             vnf_impl = self.get_vnf_impl(vnfd['id'])
428             vnf_instance = vnf_impl(node_name, vnfd, scenario_cfg['task_id'])
429             vnfs.append(vnf_instance)
430
431         self.vnfs = vnfs
432         return vnfs
433
434     def setup(self):
435         """Setup infrastructure, provission VNFs & start traffic"""
436         # 1. Verify if infrastructure mapping can meet topology
437         self.map_topology_to_infrastructure()
438         # 1a. Load VNF models
439         self.load_vnf_models()
440         # 1b. Fill traffic profile with information from topology
441         self._fill_traffic_profile()
442
443         # 2. Provision VNFs
444
445         # link events will cause VNF application to exit
446         # so we should start traffic runners before VNFs
447         traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
448         non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
449         try:
450             for vnf in chain(traffic_runners, non_traffic_runners):
451                 LOG.info("Instantiating %s", vnf.name)
452                 vnf.instantiate(self.scenario_cfg, self.context_cfg)
453                 LOG.info("Waiting for %s to instantiate", vnf.name)
454                 vnf.wait_for_instantiate()
455         except:
456             LOG.exception("")
457             for vnf in self.vnfs:
458                 vnf.terminate()
459             raise
460
461         # we have to generate pod.yaml here after VNF has probed so we know vpci and driver
462         self._generate_pod_yaml()
463
464         # 3. Run experiment
465         # Start listeners first to avoid losing packets
466         for traffic_gen in traffic_runners:
467             traffic_gen.listen_traffic(self.traffic_profile)
468
469         # register collector with yardstick for KPI collection.
470         self.collector = Collector(self.vnfs, context_base.Context.get_physical_nodes())
471         self.collector.start()
472
473         # Start the actual traffic
474         for traffic_gen in traffic_runners:
475             LOG.info("Starting traffic on %s", traffic_gen.name)
476             traffic_gen.run_traffic(self.traffic_profile)
477             self._mq_ids.append(traffic_gen.get_mq_producer_id())
478
479     def get_mq_ids(self):  # pragma: no cover
480         """Return stored MQ producer IDs"""
481         return self._mq_ids
482
483     def run(self, result):  # yardstick API
484         """ Yardstick calls run() at intervals defined in the yaml and
485             produces timestamped samples
486
487         :param result: dictionary with results to update
488         :return: None
489         """
490
491         # this is the only method that is check from the runner
492         # so if we have any fatal error it must be raised via these methods
493         # otherwise we will not terminate
494
495         result.update(self.collector.get_kpi())
496
497     def teardown(self):
498         """ Stop the collector and terminate VNF & TG instance
499
500         :return
501         """
502
503         try:
504             try:
505                 self.collector.stop()
506                 for vnf in self.vnfs:
507                     LOG.info("Stopping %s", vnf.name)
508                     vnf.terminate()
509                 LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
510             finally:
511                 terminate_children()
512         except Exception:
513             # catch any exception in teardown and convert to simple exception
514             # never pass exceptions back to multiprocessing, because some exceptions can
515             # be unpicklable
516             # https://bugs.python.org/issue9400
517             LOG.exception("")
518             raise RuntimeError("Error in teardown")
519
520     def pre_run_wait_time(self, time_seconds):  # pragma: no cover
521         """Time waited before executing the run method"""
522         time.sleep(time_seconds)
523
524     def post_run_wait_time(self, time_seconds):  # pragma: no cover
525         """Time waited after executing the run method"""
526         pass