Test case override of traffic profile settings.
[yardstick.git] / yardstick / benchmark / scenarios / networking / vnf_generic.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import copy
16 import ipaddress
17 from itertools import chain
18 import logging
19 import os
20 import sys
21 import time
22
23 import six
24 import yaml
25
26 from yardstick.benchmark.contexts import base as context_base
27 from yardstick.benchmark.scenarios import base as scenario_base
28 from yardstick.common.constants import LOG_DIR
29 from yardstick.common import exceptions
30 from yardstick.common.process import terminate_children
31 from yardstick.common import utils
32 from yardstick.network_services.collector.subscriber import Collector
33 from yardstick.network_services.vnf_generic import vnfdgen
34 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
35 from yardstick.network_services import traffic_profile
36 from yardstick.network_services.traffic_profile import base as tprofile_base
37 from yardstick.network_services.utils import get_nsb_option
38 from yardstick import ssh
39
40
41 traffic_profile.register_modules()
42
43
44 LOG = logging.getLogger(__name__)
45
46
47 class NetworkServiceTestCase(scenario_base.Scenario):
48     """Class handles Generic framework to do pre-deployment VNF &
49        Network service testing  """
50
51     __scenario_type__ = "NSPerf"
52
53     def __init__(self, scenario_cfg, context_cfg):  # pragma: no cover
54         super(NetworkServiceTestCase, self).__init__()
55         self.scenario_cfg = scenario_cfg
56         self.context_cfg = context_cfg
57
58         self._render_topology()
59         self.vnfs = []
60         self.collector = None
61         self.traffic_profile = None
62         self.node_netdevs = {}
63         self.bin_path = get_nsb_option('bin_path', '')
64         self._mq_ids = []
65
66     def _get_ip_flow_range(self, ip_start_range):
67         """Retrieve a CIDR first and last viable IPs
68
69         :param ip_start_range: could be the IP range itself or a dictionary
70                with the host name and the port.
71         :return: (str) IP range (min, max) with this format "x.x.x.x-y.y.y.y"
72         """
73         if isinstance(ip_start_range, six.string_types):
74             return ip_start_range
75
76         node_name, range_or_interface = next(iter(ip_start_range.items()),
77                                              (None, '0.0.0.0'))
78         if node_name is None:
79             return range_or_interface
80
81         node = self.context_cfg['nodes'].get(node_name, {})
82         interface = node.get('interfaces', {}).get(range_or_interface)
83         if interface:
84             ip = interface['local_ip']
85             mask = interface['netmask']
86         else:
87             ip = '0.0.0.0'
88             mask = '255.255.255.0'
89
90         ipaddr = ipaddress.ip_network(
91             six.text_type('{}/{}'.format(ip, mask)), strict=False)
92         if ipaddr.prefixlen + 2 < ipaddr.max_prefixlen:
93             ip_addr_range = '{}-{}'.format(ipaddr[2], ipaddr[-2])
94         else:
95             LOG.warning('Only single IP in range %s', ipaddr)
96             ip_addr_range = ip
97         return ip_addr_range
98
99     def _get_traffic_flow(self):
100         flow = {}
101         try:
102             # TODO: should be .0  or .1 so we can use list
103             # but this also roughly matches uplink_0, downlink_0
104             fflow = self.scenario_cfg["options"]["flow"]
105             for index, src in enumerate(fflow.get("src_ip", [])):
106                 flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)
107
108             for index, dst in enumerate(fflow.get("dst_ip", [])):
109                 flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)
110
111             for index, publicip in enumerate(fflow.get("public_ip", [])):
112                 flow["public_ip_{}".format(index)] = publicip
113
114             for index, src_port in enumerate(fflow.get("src_port", [])):
115                 flow["src_port_{}".format(index)] = src_port
116
117             for index, dst_port in enumerate(fflow.get("dst_port", [])):
118                 flow["dst_port_{}".format(index)] = dst_port
119
120             if "count" in fflow:
121                 flow["count"] = fflow["count"]
122
123             if "srcseed" in fflow:
124                 flow["srcseed"] = fflow["srcseed"]
125
126             if "dstseed" in fflow:
127                 flow["dstseed"] = fflow["dstseed"]
128
129         except KeyError:
130             flow = {}
131         return {"flow": flow}
132
133     def _get_traffic_imix(self):
134         try:
135             imix = {"imix": self.scenario_cfg['options']['framesize']}
136         except KeyError:
137             imix = {}
138         return imix
139
140     def _get_traffic_profile(self):
141         profile = self.scenario_cfg["traffic_profile"]
142         path = self.scenario_cfg["task_path"]
143         with utils.open_relative_file(profile, path) as infile:
144             return infile.read()
145
146     def _get_duration(self):
147         options = self.scenario_cfg.get('options', {})
148         return options.get('duration',
149                            tprofile_base.TrafficProfileConfig.DEFAULT_DURATION)
150
151     def _fill_traffic_profile(self):
152         tprofile = self._get_traffic_profile()
153         extra_args = self.scenario_cfg.get('extra_args', {})
154         tprofile_data = {
155             'flow': self._get_traffic_flow(),
156             'imix': self._get_traffic_imix(),
157             tprofile_base.TrafficProfile.UPLINK: {},
158             tprofile_base.TrafficProfile.DOWNLINK: {},
159             'extra_args': extra_args,
160             'duration': self._get_duration()}
161
162         traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
163
164         traffic_config = \
165             self.scenario_cfg.get("options", {}).get("traffic_config", {})
166
167         traffic_vnfd.setdefault("traffic_profile", {})
168         traffic_vnfd["traffic_profile"].update(traffic_config)
169
170         self.traffic_profile = \
171             tprofile_base.TrafficProfile.get(traffic_vnfd)
172
173     def _get_topology(self):
174         topology = self.scenario_cfg["topology"]
175         path = self.scenario_cfg["task_path"]
176         with utils.open_relative_file(topology, path) as infile:
177             return infile.read()
178
179     def _render_topology(self):
180         topology = self._get_topology()
181         topology_args = self.scenario_cfg.get('extra_args', {})
182         topolgy_data = {
183             'extra_args': topology_args
184         }
185         topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
186         self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
187
188     def _find_vnf_name_from_id(self, vnf_id):  # pragma: no cover
189         return next((vnfd["vnfd-id-ref"]
190                      for vnfd in self.topology["constituent-vnfd"]
191                      if vnf_id == vnfd["member-vnf-index"]), None)
192
193     def _find_vnfd_from_vnf_idx(self, vnf_id):  # pragma: no cover
194         return next((vnfd
195                      for vnfd in self.topology["constituent-vnfd"]
196                      if vnf_id == vnfd["member-vnf-index"]), None)
197
198     @staticmethod
199     def find_node_if(nodes, name, if_name, vld_id):  # pragma: no cover
200         try:
201             # check for xe0, xe1
202             intf = nodes[name]["interfaces"][if_name]
203         except KeyError:
204             # if not xe0, then maybe vld_id,  uplink_0, downlink_0
205             # pop it and re-insert with the correct name from topology
206             intf = nodes[name]["interfaces"].pop(vld_id)
207             nodes[name]["interfaces"][if_name] = intf
208         return intf
209
210     def _resolve_topology(self):
211         for vld in self.topology["vld"]:
212             try:
213                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
214             except (ValueError, TypeError):
215                 raise exceptions.IncorrectConfig(
216                     error_msg='Topology file corrupted, wrong endpoint count '
217                               'for connection')
218
219             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
220             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
221
222             node0_if_name = node0_data["vnfd-connection-point-ref"]
223             node1_if_name = node1_data["vnfd-connection-point-ref"]
224
225             try:
226                 nodes = self.context_cfg["nodes"]
227                 node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
228                 node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
229
230                 # names so we can do reverse lookups
231                 node0_if["ifname"] = node0_if_name
232                 node1_if["ifname"] = node1_if_name
233
234                 node0_if["node_name"] = node0_name
235                 node1_if["node_name"] = node1_name
236
237                 node0_if["vld_id"] = vld["id"]
238                 node1_if["vld_id"] = vld["id"]
239
240                 # set peer name
241                 node0_if["peer_name"] = node1_name
242                 node1_if["peer_name"] = node0_name
243
244                 # set peer interface name
245                 node0_if["peer_ifname"] = node1_if_name
246                 node1_if["peer_ifname"] = node0_if_name
247
248                 # just load the network
249                 vld_networks = {n.get('vld_id', name): n for name, n in
250                                 self.context_cfg["networks"].items()}
251
252                 node0_if["network"] = vld_networks.get(vld["id"], {})
253                 node1_if["network"] = vld_networks.get(vld["id"], {})
254
255                 node0_if["dst_mac"] = node1_if["local_mac"]
256                 node0_if["dst_ip"] = node1_if["local_ip"]
257
258                 node1_if["dst_mac"] = node0_if["local_mac"]
259                 node1_if["dst_ip"] = node0_if["local_ip"]
260
261             except KeyError:
262                 LOG.exception("")
263                 raise exceptions.IncorrectConfig(
264                     error_msg='Required interface not found, topology file '
265                               'corrupted')
266
267         for vld in self.topology['vld']:
268             try:
269                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
270             except (ValueError, TypeError):
271                 raise exceptions.IncorrectConfig(
272                     error_msg='Topology file corrupted, wrong endpoint count '
273                               'for connection')
274
275             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
276             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
277
278             node0_if_name = node0_data["vnfd-connection-point-ref"]
279             node1_if_name = node1_data["vnfd-connection-point-ref"]
280
281             nodes = self.context_cfg["nodes"]
282             node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
283             node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
284
285             # add peer interface dict, but remove circular link
286             # TODO: don't waste memory
287             node0_copy = node0_if.copy()
288             node1_copy = node1_if.copy()
289             node0_if["peer_intf"] = node1_copy
290             node1_if["peer_intf"] = node0_copy
291
292     def _update_context_with_topology(self):  # pragma: no cover
293         for vnfd in self.topology["constituent-vnfd"]:
294             vnf_idx = vnfd["member-vnf-index"]
295             vnf_name = self._find_vnf_name_from_id(vnf_idx)
296             vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
297             self.context_cfg["nodes"][vnf_name].update(vnfd)
298
299     def _generate_pod_yaml(self):  # pragma: no cover
300         context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
301         # convert OrderedDict to a list
302         # pod.yaml nodes is a list
303         nodes = [self._serialize_node(node) for node in self.context_cfg["nodes"].values()]
304         pod_dict = {
305             "nodes": nodes,
306             "networks": self.context_cfg["networks"]
307         }
308         with open(context_yaml, "w") as context_out:
309             yaml.safe_dump(pod_dict, context_out, default_flow_style=False,
310                            explicit_start=True)
311
312     @staticmethod
313     def _serialize_node(node):  # pragma: no cover
314         new_node = copy.deepcopy(node)
315         # name field is required
316         # remove context suffix
317         new_node["name"] = node['name'].split('.')[0]
318         try:
319             new_node["pkey"] = ssh.convert_key_to_str(node["pkey"])
320         except KeyError:
321             pass
322         return new_node
323
324     def map_topology_to_infrastructure(self):
325         """ This method should verify if the available resources defined in pod.yaml
326         match the topology.yaml file.
327
328         :return: None. Side effect: context_cfg is updated
329         """
330         # 3. Use topology file to find connections & resolve dest address
331         self._resolve_topology()
332         self._update_context_with_topology()
333
334     @classmethod
335     def get_vnf_impl(cls, vnf_model_id):  # pragma: no cover
336         """ Find the implementing class from vnf_model["vnf"]["name"] field
337
338         :param vnf_model_id: parsed vnfd model ID field
339         :return: subclass of GenericVNF
340         """
341         utils.import_modules_from_package(
342             "yardstick.network_services.vnf_generic.vnf")
343         expected_name = vnf_model_id
344         classes_found = []
345
346         def impl():
347             for name, class_ in ((c.__name__, c) for c in
348                                  utils.itersubclasses(GenericVNF)):
349                 if name == expected_name:
350                     yield class_
351                 classes_found.append(name)
352
353         try:
354             return next(impl())
355         except StopIteration:
356             pass
357
358         message = ('No implementation for %s found in %s'
359                    % (expected_name, classes_found))
360         raise exceptions.IncorrectConfig(error_msg=message)
361
362     @staticmethod
363     def create_interfaces_from_node(vnfd, node):  # pragma: no cover
364         ext_intfs = vnfd["vdu"][0]["external-interface"] = []
365         # have to sort so xe0 goes first
366         for intf_name, intf in sorted(node['interfaces'].items()):
367             # only interfaces with vld_id are added.
368             # Thus there are two layers of filters, only intefaces with vld_id
369             # show up in interfaces, and only interfaces with traffic profiles
370             # are used by the generators
371             if intf.get('vld_id'):
372                 # force dpkd_port_num to int so we can do reverse lookup
373                 try:
374                     intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
375                 except KeyError:
376                     pass
377                 ext_intf = {
378                     "name": intf_name,
379                     "virtual-interface": intf,
380                     "vnfd-connection-point-ref": intf_name,
381                 }
382                 ext_intfs.append(ext_intf)
383
384     def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
385         """ Create VNF objects based on YAML descriptors
386
387         :param scenario_cfg:
388         :type scenario_cfg:
389         :param context_cfg:
390         :return:
391         """
392         trex_lib_path = get_nsb_option('trex_client_lib')
393         sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))
394
395         if scenario_cfg is None:
396             scenario_cfg = self.scenario_cfg
397
398         if context_cfg is None:
399             context_cfg = self.context_cfg
400
401         vnfs = []
402         # we assume OrderedDict for consistency in instantiation
403         for node_name, node in context_cfg["nodes"].items():
404             LOG.debug(node)
405             try:
406                 file_name = node["VNF model"]
407             except KeyError:
408                 LOG.debug("no model for %s, skipping", node_name)
409                 continue
410             file_path = scenario_cfg['task_path']
411             with utils.open_relative_file(file_name, file_path) as stream:
412                 vnf_model = stream.read()
413             vnfd = vnfdgen.generate_vnfd(vnf_model, node)
414             # TODO: here add extra context_cfg["nodes"] regardless of template
415             vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
416             # force inject pkey if it exists
417             # we want to standardize Heat using pkey as a string so we don't rely
418             # on the filesystem
419             try:
420                 vnfd['mgmt-interface']['pkey'] = node['pkey']
421             except KeyError:
422                 pass
423             self.create_interfaces_from_node(vnfd, node)
424             vnf_impl = self.get_vnf_impl(vnfd['id'])
425             vnf_instance = vnf_impl(node_name, vnfd, scenario_cfg['task_id'])
426             vnfs.append(vnf_instance)
427
428         self.vnfs = vnfs
429         return vnfs
430
431     def setup(self):
432         """Setup infrastructure, provission VNFs & start traffic"""
433         # 1. Verify if infrastructure mapping can meet topology
434         self.map_topology_to_infrastructure()
435         # 1a. Load VNF models
436         self.load_vnf_models()
437         # 1b. Fill traffic profile with information from topology
438         self._fill_traffic_profile()
439
440         # 2. Provision VNFs
441
442         # link events will cause VNF application to exit
443         # so we should start traffic runners before VNFs
444         traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
445         non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
446         try:
447             for vnf in chain(traffic_runners, non_traffic_runners):
448                 LOG.info("Instantiating %s", vnf.name)
449                 vnf.instantiate(self.scenario_cfg, self.context_cfg)
450                 LOG.info("Waiting for %s to instantiate", vnf.name)
451                 vnf.wait_for_instantiate()
452         except:
453             LOG.exception("")
454             for vnf in self.vnfs:
455                 vnf.terminate()
456             raise
457
458         # we have to generate pod.yaml here after VNF has probed so we know vpci and driver
459         self._generate_pod_yaml()
460
461         # 3. Run experiment
462         # Start listeners first to avoid losing packets
463         for traffic_gen in traffic_runners:
464             traffic_gen.listen_traffic(self.traffic_profile)
465
466         # register collector with yardstick for KPI collection.
467         self.collector = Collector(self.vnfs, context_base.Context.get_physical_nodes())
468         self.collector.start()
469
470         # Start the actual traffic
471         for traffic_gen in traffic_runners:
472             LOG.info("Starting traffic on %s", traffic_gen.name)
473             traffic_gen.run_traffic(self.traffic_profile)
474             self._mq_ids.append(traffic_gen.get_mq_producer_id())
475
476     def get_mq_ids(self):  # pragma: no cover
477         """Return stored MQ producer IDs"""
478         return self._mq_ids
479
480     def run(self, result):  # yardstick API
481         """ Yardstick calls run() at intervals defined in the yaml and
482             produces timestamped samples
483
484         :param result: dictionary with results to update
485         :return: None
486         """
487
488         # this is the only method that is check from the runner
489         # so if we have any fatal error it must be raised via these methods
490         # otherwise we will not terminate
491
492         result.update(self.collector.get_kpi())
493
494     def teardown(self):
495         """ Stop the collector and terminate VNF & TG instance
496
497         :return
498         """
499
500         try:
501             try:
502                 self.collector.stop()
503                 for vnf in self.vnfs:
504                     LOG.info("Stopping %s", vnf.name)
505                     vnf.terminate()
506                 LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
507             finally:
508                 terminate_children()
509         except Exception:
510             # catch any exception in teardown and convert to simple exception
511             # never pass exceptions back to multiprocessing, because some exceptions can
512             # be unpicklable
513             # https://bugs.python.org/issue9400
514             LOG.exception("")
515             raise RuntimeError("Error in teardown")
516
517     def pre_run_wait_time(self, time_seconds):  # pragma: no cover
518         """Time waited before executing the run method"""
519         time.sleep(time_seconds)
520
521     def post_run_wait_time(self, time_seconds):  # pragma: no cover
522         """Time waited after executing the run method"""
523         pass