Merge "Bugfix: HA kill process recovery has a conflict"
[yardstick.git] / yardstick / benchmark / scenarios / networking / vnf_generic.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import copy
16 import ipaddress
17 from itertools import chain
18 import logging
19 import os
20 import sys
21 import time
22
23 import six
24 import yaml
25
26 from yardstick.benchmark.scenarios import base as scenario_base
27 from yardstick.common.constants import LOG_DIR
28 from yardstick.common import exceptions
29 from yardstick.common.process import terminate_children
30 from yardstick.common import utils
31 from yardstick.network_services.collector.subscriber import Collector
32 from yardstick.network_services.vnf_generic import vnfdgen
33 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
34 from yardstick.network_services import traffic_profile
35 from yardstick.network_services.traffic_profile import base as tprofile_base
36 from yardstick.network_services.utils import get_nsb_option
37 from yardstick import ssh
38
39 traffic_profile.register_modules()
40
41
42 LOG = logging.getLogger(__name__)
43
44
45 class NetworkServiceTestCase(scenario_base.Scenario):
46     """Class handles Generic framework to do pre-deployment VNF &
47        Network service testing  """
48
49     __scenario_type__ = "NSPerf"
50
51     def __init__(self, scenario_cfg, context_cfg):  # Yardstick API
52         super(NetworkServiceTestCase, self).__init__()
53         self.scenario_cfg = scenario_cfg
54         self.context_cfg = context_cfg
55
56         self._render_topology()
57         self.vnfs = []
58         self.collector = None
59         self.traffic_profile = None
60         self.node_netdevs = {}
61         self.bin_path = get_nsb_option('bin_path', '')
62
63     def _get_ip_flow_range(self, ip_start_range):
64
65         # IP range is specified as 'x.x.x.x-y.y.y.y'
66         if isinstance(ip_start_range, six.string_types):
67             return ip_start_range
68
69         node_name, range_or_interface = next(iter(ip_start_range.items()), (None, '0.0.0.0'))
70         if node_name is None:
71             # we are manually specifying the range
72             ip_addr_range = range_or_interface
73         else:
74             node = self.context_cfg["nodes"].get(node_name, {})
75             try:
76                 # the ip_range is the interface name
77                 interface = node.get("interfaces", {})[range_or_interface]
78             except KeyError:
79                 ip = "0.0.0.0"
80                 mask = "255.255.255.0"
81             else:
82                 ip = interface["local_ip"]
83                 # we can't default these values, they must both exist to be valid
84                 mask = interface["netmask"]
85
86             ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), strict=False)
87             hosts = list(ipaddr.hosts())
88             if len(hosts) > 2:
89                 # skip the first host in case of gateway
90                 ip_addr_range = "{}-{}".format(hosts[1], hosts[-1])
91             else:
92                 LOG.warning("Only single IP in range %s", ipaddr)
93                 # fall back to single IP range
94                 ip_addr_range = ip
95         return ip_addr_range
96
97     def _get_traffic_flow(self):
98         flow = {}
99         try:
100             # TODO: should be .0  or .1 so we can use list
101             # but this also roughly matches uplink_0, downlink_0
102             fflow = self.scenario_cfg["options"]["flow"]
103             for index, src in enumerate(fflow.get("src_ip", [])):
104                 flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)
105
106             for index, dst in enumerate(fflow.get("dst_ip", [])):
107                 flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)
108
109             for index, publicip in enumerate(fflow.get("public_ip", [])):
110                 flow["public_ip_{}".format(index)] = publicip
111
112             for index, src_port in enumerate(fflow.get("src_port", [])):
113                 flow["src_port_{}".format(index)] = src_port
114
115             for index, dst_port in enumerate(fflow.get("dst_port", [])):
116                 flow["dst_port_{}".format(index)] = dst_port
117
118             flow["count"] = fflow["count"]
119         except KeyError:
120             flow = {}
121         return {"flow": flow}
122
123     def _get_traffic_imix(self):
124         try:
125             imix = {"imix": self.scenario_cfg['options']['framesize']}
126         except KeyError:
127             imix = {}
128         return imix
129
130     def _get_traffic_profile(self):
131         profile = self.scenario_cfg["traffic_profile"]
132         path = self.scenario_cfg["task_path"]
133         with utils.open_relative_file(profile, path) as infile:
134             return infile.read()
135
136     def _get_duration(self):
137         options = self.scenario_cfg.get('options', {})
138         return options.get('duration',
139                            tprofile_base.TrafficProfileConfig.DEFAULT_DURATION)
140
141     def _fill_traffic_profile(self):
142         tprofile = self._get_traffic_profile()
143         extra_args = self.scenario_cfg.get('extra_args', {})
144         tprofile_data = {
145             'flow': self._get_traffic_flow(),
146             'imix': self._get_traffic_imix(),
147             tprofile_base.TrafficProfile.UPLINK: {},
148             tprofile_base.TrafficProfile.DOWNLINK: {},
149             'extra_args': extra_args,
150             'duration': self._get_duration()}
151         traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
152         self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd)
153
154     def _get_topology(self):
155         topology = self.scenario_cfg["topology"]
156         path = self.scenario_cfg["task_path"]
157         with utils.open_relative_file(topology, path) as infile:
158             return infile.read()
159
160     def _render_topology(self):
161         topology = self._get_topology()
162         topology_args = self.scenario_cfg.get('extra_args', {})
163         topolgy_data = {
164             'extra_args': topology_args
165         }
166         topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
167         self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
168
169     def _find_vnf_name_from_id(self, vnf_id):
170         return next((vnfd["vnfd-id-ref"]
171                      for vnfd in self.topology["constituent-vnfd"]
172                      if vnf_id == vnfd["member-vnf-index"]), None)
173
174     def _find_vnfd_from_vnf_idx(self, vnf_id):
175         return next((vnfd
176                      for vnfd in self.topology["constituent-vnfd"]
177                      if vnf_id == vnfd["member-vnf-index"]), None)
178
179     @staticmethod
180     def find_node_if(nodes, name, if_name, vld_id):
181         try:
182             # check for xe0, xe1
183             intf = nodes[name]["interfaces"][if_name]
184         except KeyError:
185             # if not xe0, then maybe vld_id,  uplink_0, downlink_0
186             # pop it and re-insert with the correct name from topology
187             intf = nodes[name]["interfaces"].pop(vld_id)
188             nodes[name]["interfaces"][if_name] = intf
189         return intf
190
191     def _resolve_topology(self):
192         for vld in self.topology["vld"]:
193             try:
194                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
195             except (ValueError, TypeError):
196                 raise exceptions.IncorrectConfig(
197                     error_msg='Topology file corrupted, wrong endpoint count '
198                               'for connection')
199
200             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
201             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
202
203             node0_if_name = node0_data["vnfd-connection-point-ref"]
204             node1_if_name = node1_data["vnfd-connection-point-ref"]
205
206             try:
207                 nodes = self.context_cfg["nodes"]
208                 node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
209                 node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
210
211                 # names so we can do reverse lookups
212                 node0_if["ifname"] = node0_if_name
213                 node1_if["ifname"] = node1_if_name
214
215                 node0_if["node_name"] = node0_name
216                 node1_if["node_name"] = node1_name
217
218                 node0_if["vld_id"] = vld["id"]
219                 node1_if["vld_id"] = vld["id"]
220
221                 # set peer name
222                 node0_if["peer_name"] = node1_name
223                 node1_if["peer_name"] = node0_name
224
225                 # set peer interface name
226                 node0_if["peer_ifname"] = node1_if_name
227                 node1_if["peer_ifname"] = node0_if_name
228
229                 # just load the network
230                 vld_networks = {n.get('vld_id', name): n for name, n in
231                                 self.context_cfg["networks"].items()}
232
233                 node0_if["network"] = vld_networks.get(vld["id"], {})
234                 node1_if["network"] = vld_networks.get(vld["id"], {})
235
236                 node0_if["dst_mac"] = node1_if["local_mac"]
237                 node0_if["dst_ip"] = node1_if["local_ip"]
238
239                 node1_if["dst_mac"] = node0_if["local_mac"]
240                 node1_if["dst_ip"] = node0_if["local_ip"]
241
242             except KeyError:
243                 LOG.exception("")
244                 raise exceptions.IncorrectConfig(
245                     error_msg='Required interface not found, topology file '
246                               'corrupted')
247
248         for vld in self.topology['vld']:
249             try:
250                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
251             except (ValueError, TypeError):
252                 raise exceptions.IncorrectConfig(
253                     error_msg='Topology file corrupted, wrong endpoint count '
254                               'for connection')
255
256             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
257             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
258
259             node0_if_name = node0_data["vnfd-connection-point-ref"]
260             node1_if_name = node1_data["vnfd-connection-point-ref"]
261
262             nodes = self.context_cfg["nodes"]
263             node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
264             node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
265
266             # add peer interface dict, but remove circular link
267             # TODO: don't waste memory
268             node0_copy = node0_if.copy()
269             node1_copy = node1_if.copy()
270             node0_if["peer_intf"] = node1_copy
271             node1_if["peer_intf"] = node0_copy
272
273     def _update_context_with_topology(self):
274         for vnfd in self.topology["constituent-vnfd"]:
275             vnf_idx = vnfd["member-vnf-index"]
276             vnf_name = self._find_vnf_name_from_id(vnf_idx)
277             vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
278             self.context_cfg["nodes"][vnf_name].update(vnfd)
279
280     def _generate_pod_yaml(self):
281         context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
282         # convert OrderedDict to a list
283         # pod.yaml nodes is a list
284         nodes = [self._serialize_node(node) for node in self.context_cfg["nodes"].values()]
285         pod_dict = {
286             "nodes": nodes,
287             "networks": self.context_cfg["networks"]
288         }
289         with open(context_yaml, "w") as context_out:
290             yaml.safe_dump(pod_dict, context_out, default_flow_style=False,
291                            explicit_start=True)
292
293     @staticmethod
294     def _serialize_node(node):
295         new_node = copy.deepcopy(node)
296         # name field is required
297         # remove context suffix
298         new_node["name"] = node['name'].split('.')[0]
299         try:
300             new_node["pkey"] = ssh.convert_key_to_str(node["pkey"])
301         except KeyError:
302             pass
303         return new_node
304
305     def map_topology_to_infrastructure(self):
306         """ This method should verify if the available resources defined in pod.yaml
307         match the topology.yaml file.
308
309         :return: None. Side effect: context_cfg is updated
310         """
311         # 3. Use topology file to find connections & resolve dest address
312         self._resolve_topology()
313         self._update_context_with_topology()
314
315     @classmethod
316     def get_vnf_impl(cls, vnf_model_id):
317         """ Find the implementing class from vnf_model["vnf"]["name"] field
318
319         :param vnf_model_id: parsed vnfd model ID field
320         :return: subclass of GenericVNF
321         """
322         utils.import_modules_from_package(
323             "yardstick.network_services.vnf_generic.vnf")
324         expected_name = vnf_model_id
325         classes_found = []
326
327         def impl():
328             for name, class_ in ((c.__name__, c) for c in
329                                  utils.itersubclasses(GenericVNF)):
330                 if name == expected_name:
331                     yield class_
332                 classes_found.append(name)
333
334         try:
335             return next(impl())
336         except StopIteration:
337             pass
338
339         message = ('No implementation for %s found in %s'
340                    % (expected_name, classes_found))
341         raise exceptions.IncorrectConfig(error_msg=message)
342
343     @staticmethod
344     def create_interfaces_from_node(vnfd, node):
345         ext_intfs = vnfd["vdu"][0]["external-interface"] = []
346         # have to sort so xe0 goes first
347         for intf_name, intf in sorted(node['interfaces'].items()):
348             # only interfaces with vld_id are added.
349             # Thus there are two layers of filters, only intefaces with vld_id
350             # show up in interfaces, and only interfaces with traffic profiles
351             # are used by the generators
352             if intf.get('vld_id'):
353                 # force dpkd_port_num to int so we can do reverse lookup
354                 try:
355                     intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
356                 except KeyError:
357                     pass
358                 ext_intf = {
359                     "name": intf_name,
360                     "virtual-interface": intf,
361                     "vnfd-connection-point-ref": intf_name,
362                 }
363                 ext_intfs.append(ext_intf)
364
365     def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
366         """ Create VNF objects based on YAML descriptors
367
368         :param scenario_cfg:
369         :type scenario_cfg:
370         :param context_cfg:
371         :return:
372         """
373         trex_lib_path = get_nsb_option('trex_client_lib')
374         sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))
375
376         if scenario_cfg is None:
377             scenario_cfg = self.scenario_cfg
378
379         if context_cfg is None:
380             context_cfg = self.context_cfg
381
382         vnfs = []
383         # we assume OrderedDict for consistency in instantiation
384         for node_name, node in context_cfg["nodes"].items():
385             LOG.debug(node)
386             try:
387                 file_name = node["VNF model"]
388             except KeyError:
389                 LOG.debug("no model for %s, skipping", node_name)
390                 continue
391             file_path = scenario_cfg['task_path']
392             with utils.open_relative_file(file_name, file_path) as stream:
393                 vnf_model = stream.read()
394             vnfd = vnfdgen.generate_vnfd(vnf_model, node)
395             # TODO: here add extra context_cfg["nodes"] regardless of template
396             vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
397             # force inject pkey if it exists
398             # we want to standardize Heat using pkey as a string so we don't rely
399             # on the filesystem
400             try:
401                 vnfd['mgmt-interface']['pkey'] = node['pkey']
402             except KeyError:
403                 pass
404             self.create_interfaces_from_node(vnfd, node)
405             vnf_impl = self.get_vnf_impl(vnfd['id'])
406             vnf_instance = vnf_impl(node_name, vnfd)
407             vnfs.append(vnf_instance)
408
409         self.vnfs = vnfs
410         return vnfs
411
412     def setup(self):
413         """ Setup infrastructure, provission VNFs & start traffic
414
415         :return:
416         """
417         # 1. Verify if infrastructure mapping can meet topology
418         self.map_topology_to_infrastructure()
419         # 1a. Load VNF models
420         self.load_vnf_models()
421         # 1b. Fill traffic profile with information from topology
422         self._fill_traffic_profile()
423
424         # 2. Provision VNFs
425
426         # link events will cause VNF application to exit
427         # so we should start traffic runners before VNFs
428         traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
429         non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
430         try:
431             for vnf in chain(traffic_runners, non_traffic_runners):
432                 LOG.info("Instantiating %s", vnf.name)
433                 vnf.instantiate(self.scenario_cfg, self.context_cfg)
434                 LOG.info("Waiting for %s to instantiate", vnf.name)
435                 vnf.wait_for_instantiate()
436         except:
437             LOG.exception("")
438             for vnf in self.vnfs:
439                 vnf.terminate()
440             raise
441
442         # we have to generate pod.yaml here after VNF has probed so we know vpci and driver
443         self._generate_pod_yaml()
444
445         # 3. Run experiment
446         # Start listeners first to avoid losing packets
447         for traffic_gen in traffic_runners:
448             traffic_gen.listen_traffic(self.traffic_profile)
449
450         # register collector with yardstick for KPI collection.
451         self.collector = Collector(self.vnfs)
452         self.collector.start()
453
454         # Start the actual traffic
455         for traffic_gen in traffic_runners:
456             LOG.info("Starting traffic on %s", traffic_gen.name)
457             traffic_gen.run_traffic(self.traffic_profile)
458
459     def run(self, result):  # yardstick API
460         """ Yardstick calls run() at intervals defined in the yaml and
461             produces timestamped samples
462
463         :param result: dictionary with results to update
464         :return: None
465         """
466
467         # this is the only method that is check from the runner
468         # so if we have any fatal error it must be raised via these methods
469         # otherwise we will not terminate
470
471         result.update(self.collector.get_kpi())
472
473     def teardown(self):
474         """ Stop the collector and terminate VNF & TG instance
475
476         :return
477         """
478
479         try:
480             try:
481                 self.collector.stop()
482                 for vnf in self.vnfs:
483                     LOG.info("Stopping %s", vnf.name)
484                     vnf.terminate()
485                 LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
486             finally:
487                 terminate_children()
488         except Exception:
489             # catch any exception in teardown and convert to simple exception
490             # never pass exceptions back to multiprocessing, because some exceptions can
491             # be unpicklable
492             # https://bugs.python.org/issue9400
493             LOG.exception("")
494             raise RuntimeError("Error in teardown")
495
496     def pre_run_wait_time(self, time_seconds):
497         """Time waited before executing the run method"""
498         time.sleep(time_seconds)
499
500     def post_run_wait_time(self, time_seconds):
501         """Time waited after executing the run method"""
502         pass