Merge "Prox standalone test case changes:"
[yardstick.git] / yardstick / benchmark / scenarios / networking / vnf_generic.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import copy
16 import ipaddress
17 from itertools import chain
18 import logging
19 import os
20 import sys
21 import time
22
23 import six
24 import yaml
25
26 from yardstick.benchmark.contexts import base as context_base
27 from yardstick.benchmark.scenarios import base as scenario_base
28 from yardstick.common.constants import LOG_DIR
29 from yardstick.common import exceptions
30 from yardstick.common.process import terminate_children
31 from yardstick.common import utils
32 from yardstick.network_services.collector.subscriber import Collector
33 from yardstick.network_services.vnf_generic import vnfdgen
34 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
35 from yardstick.network_services import traffic_profile
36 from yardstick.network_services.traffic_profile import base as tprofile_base
37 from yardstick.network_services.utils import get_nsb_option
38 from yardstick import ssh
39
40
41 traffic_profile.register_modules()
42
43
44 LOG = logging.getLogger(__name__)
45
46
47 class NetworkServiceTestCase(scenario_base.Scenario):
48     """Class handles Generic framework to do pre-deployment VNF &
49        Network service testing  """
50
51     __scenario_type__ = "NSPerf"
52
53     def __init__(self, scenario_cfg, context_cfg):  # pragma: no cover
54         super(NetworkServiceTestCase, self).__init__()
55         self.scenario_cfg = scenario_cfg
56         self.context_cfg = context_cfg
57
58         self._render_topology()
59         self.vnfs = []
60         self.collector = None
61         self.traffic_profile = None
62         self.node_netdevs = {}
63         self.bin_path = get_nsb_option('bin_path', '')
64         self._mq_ids = []
65
66     def is_ended(self):
67         return self.traffic_profile is not None and self.traffic_profile.is_ended()
68
69     def _get_ip_flow_range(self, ip_start_range):
70         """Retrieve a CIDR first and last viable IPs
71
72         :param ip_start_range: could be the IP range itself or a dictionary
73                with the host name and the port.
74         :return: (str) IP range (min, max) with this format "x.x.x.x-y.y.y.y"
75         """
76         if isinstance(ip_start_range, six.string_types):
77             return ip_start_range
78
79         node_name, range_or_interface = next(iter(ip_start_range.items()),
80                                              (None, '0.0.0.0'))
81         if node_name is None:
82             return range_or_interface
83
84         node = self.context_cfg['nodes'].get(node_name, {})
85         interface = node.get('interfaces', {}).get(range_or_interface)
86         if interface:
87             ip = interface['local_ip']
88             mask = interface['netmask']
89         else:
90             ip = '0.0.0.0'
91             mask = '255.255.255.0'
92
93         ipaddr = ipaddress.ip_network(
94             six.text_type('{}/{}'.format(ip, mask)), strict=False)
95         if ipaddr.prefixlen + 2 < ipaddr.max_prefixlen:
96             ip_addr_range = '{}-{}'.format(ipaddr[2], ipaddr[-2])
97         else:
98             LOG.warning('Only single IP in range %s', ipaddr)
99             ip_addr_range = ip
100         return ip_addr_range
101
102     def _get_traffic_flow(self):
103         flow = {}
104         try:
105             # TODO: should be .0  or .1 so we can use list
106             # but this also roughly matches uplink_0, downlink_0
107             fflow = self.scenario_cfg["options"]["flow"]
108             for index, src in enumerate(fflow.get("src_ip", [])):
109                 flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)
110
111             for index, dst in enumerate(fflow.get("dst_ip", [])):
112                 flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)
113
114             for index, publicip in enumerate(fflow.get("public_ip", [])):
115                 flow["public_ip_{}".format(index)] = publicip
116
117             for index, src_port in enumerate(fflow.get("src_port", [])):
118                 flow["src_port_{}".format(index)] = src_port
119
120             for index, dst_port in enumerate(fflow.get("dst_port", [])):
121                 flow["dst_port_{}".format(index)] = dst_port
122
123             if "count" in fflow:
124                 flow["count"] = fflow["count"]
125
126             if "srcseed" in fflow:
127                 flow["srcseed"] = fflow["srcseed"]
128
129             if "dstseed" in fflow:
130                 flow["dstseed"] = fflow["dstseed"]
131
132         except KeyError:
133             flow = {}
134         return {"flow": flow}
135
136     def _get_traffic_imix(self):
137         try:
138             imix = {"imix": self.scenario_cfg['options']['framesize']}
139         except KeyError:
140             imix = {}
141         return imix
142
143     def _get_traffic_profile(self):
144         profile = self.scenario_cfg["traffic_profile"]
145         path = self.scenario_cfg["task_path"]
146         with utils.open_relative_file(profile, path) as infile:
147             return infile.read()
148
149     def _get_duration(self):
150         options = self.scenario_cfg.get('options', {})
151         return options.get('duration',
152                            tprofile_base.TrafficProfileConfig.DEFAULT_DURATION)
153
154     def _key_list_to_dict(self, key, value_list):
155         value_dict = {}
156         try:
157             for index, count in enumerate(value_list[key]):
158                 value_dict["{}_{}".format(key, index)] = count
159         except KeyError:
160             value_dict = {}
161
162         return value_dict
163
164     def _get_simulated_users(self):
165         users = self.scenario_cfg.get("options", {}).get("simulated_users", {})
166         simulated_users = self._key_list_to_dict("uplink", users)
167         return {"simulated_users": simulated_users}
168
169     def _get_page_object(self):
170         objects = self.scenario_cfg.get("options", {}).get("page_object", {})
171         page_object = self._key_list_to_dict("uplink", objects)
172         return {"page_object": page_object}
173
174     def _fill_traffic_profile(self):
175         tprofile = self._get_traffic_profile()
176         extra_args = self.scenario_cfg.get('extra_args', {})
177         tprofile_data = {
178             'flow': self._get_traffic_flow(),
179             'imix': self._get_traffic_imix(),
180             tprofile_base.TrafficProfile.UPLINK: {},
181             tprofile_base.TrafficProfile.DOWNLINK: {},
182             'extra_args': extra_args,
183             'duration': self._get_duration(),
184             'page_object': self._get_page_object(),
185             'simulated_users': self._get_simulated_users()}
186         traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
187
188         traffic_config = \
189             self.scenario_cfg.get("options", {}).get("traffic_config", {})
190
191         traffic_vnfd.setdefault("traffic_profile", {})
192         traffic_vnfd["traffic_profile"].update(traffic_config)
193
194         self.traffic_profile = \
195             tprofile_base.TrafficProfile.get(traffic_vnfd)
196
197     def _get_topology(self):
198         topology = self.scenario_cfg["topology"]
199         path = self.scenario_cfg["task_path"]
200         with utils.open_relative_file(topology, path) as infile:
201             return infile.read()
202
203     def _render_topology(self):
204         topology = self._get_topology()
205         topology_args = self.scenario_cfg.get('extra_args', {})
206         topolgy_data = {
207             'extra_args': topology_args
208         }
209         topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
210         self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
211
212     def _find_vnf_name_from_id(self, vnf_id):  # pragma: no cover
213         return next((vnfd["vnfd-id-ref"]
214                      for vnfd in self.topology["constituent-vnfd"]
215                      if vnf_id == vnfd["member-vnf-index"]), None)
216
217     def _find_vnfd_from_vnf_idx(self, vnf_id):  # pragma: no cover
218         return next((vnfd
219                      for vnfd in self.topology["constituent-vnfd"]
220                      if vnf_id == vnfd["member-vnf-index"]), None)
221
222     @staticmethod
223     def find_node_if(nodes, name, if_name, vld_id):  # pragma: no cover
224         try:
225             # check for xe0, xe1
226             intf = nodes[name]["interfaces"][if_name]
227         except KeyError:
228             # if not xe0, then maybe vld_id,  uplink_0, downlink_0
229             # pop it and re-insert with the correct name from topology
230             intf = nodes[name]["interfaces"].pop(vld_id)
231             nodes[name]["interfaces"][if_name] = intf
232         return intf
233
234     def _resolve_topology(self):
235         for vld in self.topology["vld"]:
236             try:
237                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
238             except (ValueError, TypeError):
239                 raise exceptions.IncorrectConfig(
240                     error_msg='Topology file corrupted, wrong endpoint count '
241                               'for connection')
242
243             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
244             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
245
246             node0_if_name = node0_data["vnfd-connection-point-ref"]
247             node1_if_name = node1_data["vnfd-connection-point-ref"]
248
249             try:
250                 nodes = self.context_cfg["nodes"]
251                 node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
252                 node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
253
254                 # names so we can do reverse lookups
255                 node0_if["ifname"] = node0_if_name
256                 node1_if["ifname"] = node1_if_name
257
258                 node0_if["node_name"] = node0_name
259                 node1_if["node_name"] = node1_name
260
261                 node0_if["vld_id"] = vld["id"]
262                 node1_if["vld_id"] = vld["id"]
263
264                 # set peer name
265                 node0_if["peer_name"] = node1_name
266                 node1_if["peer_name"] = node0_name
267
268                 # set peer interface name
269                 node0_if["peer_ifname"] = node1_if_name
270                 node1_if["peer_ifname"] = node0_if_name
271
272                 # just load the network
273                 vld_networks = {n.get('vld_id', name): n for name, n in
274                                 self.context_cfg["networks"].items()}
275
276                 node0_if["network"] = vld_networks.get(vld["id"], {})
277                 node1_if["network"] = vld_networks.get(vld["id"], {})
278
279                 node0_if["dst_mac"] = node1_if["local_mac"]
280                 node0_if["dst_ip"] = node1_if["local_ip"]
281
282                 node1_if["dst_mac"] = node0_if["local_mac"]
283                 node1_if["dst_ip"] = node0_if["local_ip"]
284
285             except KeyError:
286                 LOG.exception("")
287                 raise exceptions.IncorrectConfig(
288                     error_msg='Required interface not found, topology file '
289                               'corrupted')
290
291         for vld in self.topology['vld']:
292             try:
293                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
294             except (ValueError, TypeError):
295                 raise exceptions.IncorrectConfig(
296                     error_msg='Topology file corrupted, wrong endpoint count '
297                               'for connection')
298
299             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
300             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
301
302             node0_if_name = node0_data["vnfd-connection-point-ref"]
303             node1_if_name = node1_data["vnfd-connection-point-ref"]
304
305             nodes = self.context_cfg["nodes"]
306             node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
307             node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
308
309             # add peer interface dict, but remove circular link
310             # TODO: don't waste memory
311             node0_copy = node0_if.copy()
312             node1_copy = node1_if.copy()
313             node0_if["peer_intf"] = node1_copy
314             node1_if["peer_intf"] = node0_copy
315
316     def _update_context_with_topology(self):  # pragma: no cover
317         for vnfd in self.topology["constituent-vnfd"]:
318             vnf_idx = vnfd["member-vnf-index"]
319             vnf_name = self._find_vnf_name_from_id(vnf_idx)
320             vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
321             self.context_cfg["nodes"][vnf_name].update(vnfd)
322
323     def _generate_pod_yaml(self):  # pragma: no cover
324         context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
325         # convert OrderedDict to a list
326         # pod.yaml nodes is a list
327         nodes = [self._serialize_node(node) for node in self.context_cfg["nodes"].values()]
328         pod_dict = {
329             "nodes": nodes,
330             "networks": self.context_cfg["networks"]
331         }
332         with open(context_yaml, "w") as context_out:
333             yaml.safe_dump(pod_dict, context_out, default_flow_style=False,
334                            explicit_start=True)
335
336     @staticmethod
337     def _serialize_node(node):  # pragma: no cover
338         new_node = copy.deepcopy(node)
339         # name field is required
340         # remove context suffix
341         new_node["name"] = node['name'].split('.')[0]
342         try:
343             new_node["pkey"] = ssh.convert_key_to_str(node["pkey"])
344         except KeyError:
345             pass
346         return new_node
347
348     def map_topology_to_infrastructure(self):
349         """ This method should verify if the available resources defined in pod.yaml
350         match the topology.yaml file.
351
352         :return: None. Side effect: context_cfg is updated
353         """
354         # 3. Use topology file to find connections & resolve dest address
355         self._resolve_topology()
356         self._update_context_with_topology()
357
358     @classmethod
359     def get_vnf_impl(cls, vnf_model_id):  # pragma: no cover
360         """ Find the implementing class from vnf_model["vnf"]["name"] field
361
362         :param vnf_model_id: parsed vnfd model ID field
363         :return: subclass of GenericVNF
364         """
365         utils.import_modules_from_package(
366             "yardstick.network_services.vnf_generic.vnf")
367         expected_name = vnf_model_id
368         classes_found = []
369
370         def impl():
371             for name, class_ in ((c.__name__, c) for c in
372                                  utils.itersubclasses(GenericVNF)):
373                 if name == expected_name:
374                     yield class_
375                 classes_found.append(name)
376
377         try:
378             return next(impl())
379         except StopIteration:
380             pass
381
382         message = ('No implementation for %s found in %s'
383                    % (expected_name, classes_found))
384         raise exceptions.IncorrectConfig(error_msg=message)
385
386     @staticmethod
387     def create_interfaces_from_node(vnfd, node):  # pragma: no cover
388         ext_intfs = vnfd["vdu"][0]["external-interface"] = []
389         # have to sort so xe0 goes first
390         for intf_name, intf in sorted(node['interfaces'].items()):
391             # only interfaces with vld_id are added.
392             # Thus there are two layers of filters, only intefaces with vld_id
393             # show up in interfaces, and only interfaces with traffic profiles
394             # are used by the generators
395             if intf.get('vld_id'):
396                 # force dpkd_port_num to int so we can do reverse lookup
397                 try:
398                     intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
399                 except KeyError:
400                     pass
401                 ext_intf = {
402                     "name": intf_name,
403                     "virtual-interface": intf,
404                     "vnfd-connection-point-ref": intf_name,
405                 }
406                 ext_intfs.append(ext_intf)
407
408     def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
409         """ Create VNF objects based on YAML descriptors
410
411         :param scenario_cfg:
412         :type scenario_cfg:
413         :param context_cfg:
414         :return:
415         """
416         trex_lib_path = get_nsb_option('trex_client_lib')
417         sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))
418
419         if scenario_cfg is None:
420             scenario_cfg = self.scenario_cfg
421
422         if context_cfg is None:
423             context_cfg = self.context_cfg
424
425         vnfs = []
426         # we assume OrderedDict for consistency in instantiation
427         for node_name, node in context_cfg["nodes"].items():
428             LOG.debug(node)
429             try:
430                 file_name = node["VNF model"]
431             except KeyError:
432                 LOG.debug("no model for %s, skipping", node_name)
433                 continue
434             file_path = scenario_cfg['task_path']
435             with utils.open_relative_file(file_name, file_path) as stream:
436                 vnf_model = stream.read()
437             vnfd = vnfdgen.generate_vnfd(vnf_model, node)
438             # TODO: here add extra context_cfg["nodes"] regardless of template
439             vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
440             # force inject pkey if it exists
441             # we want to standardize Heat using pkey as a string so we don't rely
442             # on the filesystem
443             try:
444                 vnfd['mgmt-interface']['pkey'] = node['pkey']
445             except KeyError:
446                 pass
447             self.create_interfaces_from_node(vnfd, node)
448             vnf_impl = self.get_vnf_impl(vnfd['id'])
449             vnf_instance = vnf_impl(node_name, vnfd, scenario_cfg['task_id'])
450             vnfs.append(vnf_instance)
451
452         self.vnfs = vnfs
453         return vnfs
454
455     def setup(self):
456         """Setup infrastructure, provission VNFs & start traffic"""
457         # 1. Verify if infrastructure mapping can meet topology
458         self.map_topology_to_infrastructure()
459         # 1a. Load VNF models
460         self.load_vnf_models()
461         # 1b. Fill traffic profile with information from topology
462         self._fill_traffic_profile()
463
464         # 2. Provision VNFs
465
466         # link events will cause VNF application to exit
467         # so we should start traffic runners before VNFs
468         traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
469         non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
470         try:
471             for vnf in chain(traffic_runners, non_traffic_runners):
472                 LOG.info("Instantiating %s", vnf.name)
473                 vnf.instantiate(self.scenario_cfg, self.context_cfg)
474                 LOG.info("Waiting for %s to instantiate", vnf.name)
475                 vnf.wait_for_instantiate()
476         except:
477             LOG.exception("")
478             for vnf in self.vnfs:
479                 vnf.terminate()
480             raise
481
482         # we have to generate pod.yaml here after VNF has probed so we know vpci and driver
483         self._generate_pod_yaml()
484
485         # 3. Run experiment
486         # Start listeners first to avoid losing packets
487         for traffic_gen in traffic_runners:
488             traffic_gen.listen_traffic(self.traffic_profile)
489
490         # register collector with yardstick for KPI collection.
491         self.collector = Collector(self.vnfs, context_base.Context.get_physical_nodes())
492         self.collector.start()
493
494         # Start the actual traffic
495         for traffic_gen in traffic_runners:
496             LOG.info("Starting traffic on %s", traffic_gen.name)
497             traffic_gen.run_traffic(self.traffic_profile)
498             self._mq_ids.append(traffic_gen.get_mq_producer_id())
499
500     def get_mq_ids(self):  # pragma: no cover
501         """Return stored MQ producer IDs"""
502         return self._mq_ids
503
504     def run(self, result):  # yardstick API
505         """ Yardstick calls run() at intervals defined in the yaml and
506             produces timestamped samples
507
508         :param result: dictionary with results to update
509         :return: None
510         """
511
512         # this is the only method that is check from the runner
513         # so if we have any fatal error it must be raised via these methods
514         # otherwise we will not terminate
515
516         result.update(self.collector.get_kpi())
517
518     def teardown(self):
519         """ Stop the collector and terminate VNF & TG instance
520
521         :return
522         """
523
524         try:
525             try:
526                 self.collector.stop()
527                 for vnf in self.vnfs:
528                     LOG.info("Stopping %s", vnf.name)
529                     vnf.terminate()
530                 LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
531             finally:
532                 terminate_children()
533         except Exception:
534             # catch any exception in teardown and convert to simple exception
535             # never pass exceptions back to multiprocessing, because some exceptions can
536             # be unpicklable
537             # https://bugs.python.org/issue9400
538             LOG.exception("")
539             raise RuntimeError("Error in teardown")
540
541     def pre_run_wait_time(self, time_seconds):  # pragma: no cover
542         """Time waited before executing the run method"""
543         time.sleep(time_seconds)
544
545     def post_run_wait_time(self, time_seconds):  # pragma: no cover
546         """Time waited after executing the run method"""
547         pass