Merge "Create Dockerfile to create a yardstick-image of docker"
[yardstick.git] / yardstick / benchmark / scenarios / networking / vnf_generic.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import copy
16 import ipaddress
17 from itertools import chain
18 import logging
19 import os
20 import sys
21 import time
22
23 import six
24 import yaml
25
26 from yardstick.benchmark.contexts import base as context_base
27 from yardstick.benchmark.scenarios import base as scenario_base
28 from yardstick.common.constants import LOG_DIR
29 from yardstick.common import exceptions
30 from yardstick.common.process import terminate_children
31 from yardstick.common import utils
32 from yardstick.network_services.collector.subscriber import Collector
33 from yardstick.network_services.vnf_generic import vnfdgen
34 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
35 from yardstick.network_services import traffic_profile
36 from yardstick.network_services.traffic_profile import base as tprofile_base
37 from yardstick.network_services.utils import get_nsb_option
38 from yardstick import ssh
39
40
41 traffic_profile.register_modules()
42
43
44 LOG = logging.getLogger(__name__)
45
46
47 class NetworkServiceTestCase(scenario_base.Scenario):
48     """Class handles Generic framework to do pre-deployment VNF &
49        Network service testing  """
50
51     __scenario_type__ = "NSPerf"
52
53     def __init__(self, scenario_cfg, context_cfg):  # pragma: no cover
54         super(NetworkServiceTestCase, self).__init__()
55         self.scenario_cfg = scenario_cfg
56         self.context_cfg = context_cfg
57
58         self._render_topology()
59         self.vnfs = []
60         self.collector = None
61         self.traffic_profile = None
62         self.node_netdevs = {}
63         self.bin_path = get_nsb_option('bin_path', '')
64         self._mq_ids = []
65
66     def _get_ip_flow_range(self, ip_start_range):
67
68         # IP range is specified as 'x.x.x.x-y.y.y.y'
69         if isinstance(ip_start_range, six.string_types):
70             return ip_start_range
71
72         node_name, range_or_interface = next(iter(ip_start_range.items()), (None, '0.0.0.0'))
73         if node_name is None:
74             # we are manually specifying the range
75             ip_addr_range = range_or_interface
76         else:
77             node = self.context_cfg["nodes"].get(node_name, {})
78             try:
79                 # the ip_range is the interface name
80                 interface = node.get("interfaces", {})[range_or_interface]
81             except KeyError:
82                 ip = "0.0.0.0"
83                 mask = "255.255.255.0"
84             else:
85                 ip = interface["local_ip"]
86                 # we can't default these values, they must both exist to be valid
87                 mask = interface["netmask"]
88
89             ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), strict=False)
90             hosts = list(ipaddr.hosts())
91             if len(hosts) > 2:
92                 # skip the first host in case of gateway
93                 ip_addr_range = "{}-{}".format(hosts[1], hosts[-1])
94             else:
95                 LOG.warning("Only single IP in range %s", ipaddr)
96                 # fall back to single IP range
97                 ip_addr_range = ip
98         return ip_addr_range
99
100     def _get_traffic_flow(self):
101         flow = {}
102         try:
103             # TODO: should be .0  or .1 so we can use list
104             # but this also roughly matches uplink_0, downlink_0
105             fflow = self.scenario_cfg["options"]["flow"]
106             for index, src in enumerate(fflow.get("src_ip", [])):
107                 flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)
108
109             for index, dst in enumerate(fflow.get("dst_ip", [])):
110                 flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)
111
112             for index, publicip in enumerate(fflow.get("public_ip", [])):
113                 flow["public_ip_{}".format(index)] = publicip
114
115             for index, src_port in enumerate(fflow.get("src_port", [])):
116                 flow["src_port_{}".format(index)] = src_port
117
118             for index, dst_port in enumerate(fflow.get("dst_port", [])):
119                 flow["dst_port_{}".format(index)] = dst_port
120
121             flow["count"] = fflow["count"]
122         except KeyError:
123             flow = {}
124         return {"flow": flow}
125
126     def _get_traffic_imix(self):
127         try:
128             imix = {"imix": self.scenario_cfg['options']['framesize']}
129         except KeyError:
130             imix = {}
131         return imix
132
133     def _get_traffic_profile(self):
134         profile = self.scenario_cfg["traffic_profile"]
135         path = self.scenario_cfg["task_path"]
136         with utils.open_relative_file(profile, path) as infile:
137             return infile.read()
138
139     def _get_duration(self):
140         options = self.scenario_cfg.get('options', {})
141         return options.get('duration',
142                            tprofile_base.TrafficProfileConfig.DEFAULT_DURATION)
143
144     def _fill_traffic_profile(self):
145         tprofile = self._get_traffic_profile()
146         extra_args = self.scenario_cfg.get('extra_args', {})
147         tprofile_data = {
148             'flow': self._get_traffic_flow(),
149             'imix': self._get_traffic_imix(),
150             tprofile_base.TrafficProfile.UPLINK: {},
151             tprofile_base.TrafficProfile.DOWNLINK: {},
152             'extra_args': extra_args,
153             'duration': self._get_duration()}
154         traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
155         self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd)
156
157     def _get_topology(self):
158         topology = self.scenario_cfg["topology"]
159         path = self.scenario_cfg["task_path"]
160         with utils.open_relative_file(topology, path) as infile:
161             return infile.read()
162
163     def _render_topology(self):
164         topology = self._get_topology()
165         topology_args = self.scenario_cfg.get('extra_args', {})
166         topolgy_data = {
167             'extra_args': topology_args
168         }
169         topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
170         self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
171
172     def _find_vnf_name_from_id(self, vnf_id):  # pragma: no cover
173         return next((vnfd["vnfd-id-ref"]
174                      for vnfd in self.topology["constituent-vnfd"]
175                      if vnf_id == vnfd["member-vnf-index"]), None)
176
177     def _find_vnfd_from_vnf_idx(self, vnf_id):  # pragma: no cover
178         return next((vnfd
179                      for vnfd in self.topology["constituent-vnfd"]
180                      if vnf_id == vnfd["member-vnf-index"]), None)
181
182     @staticmethod
183     def find_node_if(nodes, name, if_name, vld_id):  # pragma: no cover
184         try:
185             # check for xe0, xe1
186             intf = nodes[name]["interfaces"][if_name]
187         except KeyError:
188             # if not xe0, then maybe vld_id,  uplink_0, downlink_0
189             # pop it and re-insert with the correct name from topology
190             intf = nodes[name]["interfaces"].pop(vld_id)
191             nodes[name]["interfaces"][if_name] = intf
192         return intf
193
194     def _resolve_topology(self):
195         for vld in self.topology["vld"]:
196             try:
197                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
198             except (ValueError, TypeError):
199                 raise exceptions.IncorrectConfig(
200                     error_msg='Topology file corrupted, wrong endpoint count '
201                               'for connection')
202
203             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
204             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
205
206             node0_if_name = node0_data["vnfd-connection-point-ref"]
207             node1_if_name = node1_data["vnfd-connection-point-ref"]
208
209             try:
210                 nodes = self.context_cfg["nodes"]
211                 node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
212                 node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
213
214                 # names so we can do reverse lookups
215                 node0_if["ifname"] = node0_if_name
216                 node1_if["ifname"] = node1_if_name
217
218                 node0_if["node_name"] = node0_name
219                 node1_if["node_name"] = node1_name
220
221                 node0_if["vld_id"] = vld["id"]
222                 node1_if["vld_id"] = vld["id"]
223
224                 # set peer name
225                 node0_if["peer_name"] = node1_name
226                 node1_if["peer_name"] = node0_name
227
228                 # set peer interface name
229                 node0_if["peer_ifname"] = node1_if_name
230                 node1_if["peer_ifname"] = node0_if_name
231
232                 # just load the network
233                 vld_networks = {n.get('vld_id', name): n for name, n in
234                                 self.context_cfg["networks"].items()}
235
236                 node0_if["network"] = vld_networks.get(vld["id"], {})
237                 node1_if["network"] = vld_networks.get(vld["id"], {})
238
239                 node0_if["dst_mac"] = node1_if["local_mac"]
240                 node0_if["dst_ip"] = node1_if["local_ip"]
241
242                 node1_if["dst_mac"] = node0_if["local_mac"]
243                 node1_if["dst_ip"] = node0_if["local_ip"]
244
245             except KeyError:
246                 LOG.exception("")
247                 raise exceptions.IncorrectConfig(
248                     error_msg='Required interface not found, topology file '
249                               'corrupted')
250
251         for vld in self.topology['vld']:
252             try:
253                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
254             except (ValueError, TypeError):
255                 raise exceptions.IncorrectConfig(
256                     error_msg='Topology file corrupted, wrong endpoint count '
257                               'for connection')
258
259             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
260             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
261
262             node0_if_name = node0_data["vnfd-connection-point-ref"]
263             node1_if_name = node1_data["vnfd-connection-point-ref"]
264
265             nodes = self.context_cfg["nodes"]
266             node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
267             node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
268
269             # add peer interface dict, but remove circular link
270             # TODO: don't waste memory
271             node0_copy = node0_if.copy()
272             node1_copy = node1_if.copy()
273             node0_if["peer_intf"] = node1_copy
274             node1_if["peer_intf"] = node0_copy
275
276     def _update_context_with_topology(self):  # pragma: no cover
277         for vnfd in self.topology["constituent-vnfd"]:
278             vnf_idx = vnfd["member-vnf-index"]
279             vnf_name = self._find_vnf_name_from_id(vnf_idx)
280             vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
281             self.context_cfg["nodes"][vnf_name].update(vnfd)
282
283     def _generate_pod_yaml(self):  # pragma: no cover
284         context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
285         # convert OrderedDict to a list
286         # pod.yaml nodes is a list
287         nodes = [self._serialize_node(node) for node in self.context_cfg["nodes"].values()]
288         pod_dict = {
289             "nodes": nodes,
290             "networks": self.context_cfg["networks"]
291         }
292         with open(context_yaml, "w") as context_out:
293             yaml.safe_dump(pod_dict, context_out, default_flow_style=False,
294                            explicit_start=True)
295
296     @staticmethod
297     def _serialize_node(node):  # pragma: no cover
298         new_node = copy.deepcopy(node)
299         # name field is required
300         # remove context suffix
301         new_node["name"] = node['name'].split('.')[0]
302         try:
303             new_node["pkey"] = ssh.convert_key_to_str(node["pkey"])
304         except KeyError:
305             pass
306         return new_node
307
308     def map_topology_to_infrastructure(self):
309         """ This method should verify if the available resources defined in pod.yaml
310         match the topology.yaml file.
311
312         :return: None. Side effect: context_cfg is updated
313         """
314         # 3. Use topology file to find connections & resolve dest address
315         self._resolve_topology()
316         self._update_context_with_topology()
317
318     @classmethod
319     def get_vnf_impl(cls, vnf_model_id):  # pragma: no cover
320         """ Find the implementing class from vnf_model["vnf"]["name"] field
321
322         :param vnf_model_id: parsed vnfd model ID field
323         :return: subclass of GenericVNF
324         """
325         utils.import_modules_from_package(
326             "yardstick.network_services.vnf_generic.vnf")
327         expected_name = vnf_model_id
328         classes_found = []
329
330         def impl():
331             for name, class_ in ((c.__name__, c) for c in
332                                  utils.itersubclasses(GenericVNF)):
333                 if name == expected_name:
334                     yield class_
335                 classes_found.append(name)
336
337         try:
338             return next(impl())
339         except StopIteration:
340             pass
341
342         message = ('No implementation for %s found in %s'
343                    % (expected_name, classes_found))
344         raise exceptions.IncorrectConfig(error_msg=message)
345
346     @staticmethod
347     def create_interfaces_from_node(vnfd, node):  # pragma: no cover
348         ext_intfs = vnfd["vdu"][0]["external-interface"] = []
349         # have to sort so xe0 goes first
350         for intf_name, intf in sorted(node['interfaces'].items()):
351             # only interfaces with vld_id are added.
352             # Thus there are two layers of filters, only intefaces with vld_id
353             # show up in interfaces, and only interfaces with traffic profiles
354             # are used by the generators
355             if intf.get('vld_id'):
356                 # force dpkd_port_num to int so we can do reverse lookup
357                 try:
358                     intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
359                 except KeyError:
360                     pass
361                 ext_intf = {
362                     "name": intf_name,
363                     "virtual-interface": intf,
364                     "vnfd-connection-point-ref": intf_name,
365                 }
366                 ext_intfs.append(ext_intf)
367
368     def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
369         """ Create VNF objects based on YAML descriptors
370
371         :param scenario_cfg:
372         :type scenario_cfg:
373         :param context_cfg:
374         :return:
375         """
376         trex_lib_path = get_nsb_option('trex_client_lib')
377         sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))
378
379         if scenario_cfg is None:
380             scenario_cfg = self.scenario_cfg
381
382         if context_cfg is None:
383             context_cfg = self.context_cfg
384
385         vnfs = []
386         # we assume OrderedDict for consistency in instantiation
387         for node_name, node in context_cfg["nodes"].items():
388             LOG.debug(node)
389             try:
390                 file_name = node["VNF model"]
391             except KeyError:
392                 LOG.debug("no model for %s, skipping", node_name)
393                 continue
394             file_path = scenario_cfg['task_path']
395             with utils.open_relative_file(file_name, file_path) as stream:
396                 vnf_model = stream.read()
397             vnfd = vnfdgen.generate_vnfd(vnf_model, node)
398             # TODO: here add extra context_cfg["nodes"] regardless of template
399             vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
400             # force inject pkey if it exists
401             # we want to standardize Heat using pkey as a string so we don't rely
402             # on the filesystem
403             try:
404                 vnfd['mgmt-interface']['pkey'] = node['pkey']
405             except KeyError:
406                 pass
407             self.create_interfaces_from_node(vnfd, node)
408             vnf_impl = self.get_vnf_impl(vnfd['id'])
409             vnf_instance = vnf_impl(node_name, vnfd, scenario_cfg['task_id'])
410             vnfs.append(vnf_instance)
411
412         self.vnfs = vnfs
413         return vnfs
414
415     def setup(self):
416         """Setup infrastructure, provission VNFs & start traffic"""
417         # 1. Verify if infrastructure mapping can meet topology
418         self.map_topology_to_infrastructure()
419         # 1a. Load VNF models
420         self.load_vnf_models()
421         # 1b. Fill traffic profile with information from topology
422         self._fill_traffic_profile()
423
424         # 2. Provision VNFs
425
426         # link events will cause VNF application to exit
427         # so we should start traffic runners before VNFs
428         traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
429         non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
430         try:
431             for vnf in chain(traffic_runners, non_traffic_runners):
432                 LOG.info("Instantiating %s", vnf.name)
433                 vnf.instantiate(self.scenario_cfg, self.context_cfg)
434                 LOG.info("Waiting for %s to instantiate", vnf.name)
435                 vnf.wait_for_instantiate()
436         except:
437             LOG.exception("")
438             for vnf in self.vnfs:
439                 vnf.terminate()
440             raise
441
442         # we have to generate pod.yaml here after VNF has probed so we know vpci and driver
443         self._generate_pod_yaml()
444
445         # 3. Run experiment
446         # Start listeners first to avoid losing packets
447         for traffic_gen in traffic_runners:
448             traffic_gen.listen_traffic(self.traffic_profile)
449
450         # register collector with yardstick for KPI collection.
451         self.collector = Collector(self.vnfs, context_base.Context.get_physical_nodes())
452         self.collector.start()
453
454         # Start the actual traffic
455         for traffic_gen in traffic_runners:
456             LOG.info("Starting traffic on %s", traffic_gen.name)
457             traffic_gen.run_traffic(self.traffic_profile)
458             self._mq_ids.append(traffic_gen.get_mq_producer_id())
459
460     def get_mq_ids(self):  # pragma: no cover
461         """Return stored MQ producer IDs"""
462         return self._mq_ids
463
464     def run(self, result):  # yardstick API
465         """ Yardstick calls run() at intervals defined in the yaml and
466             produces timestamped samples
467
468         :param result: dictionary with results to update
469         :return: None
470         """
471
472         # this is the only method that is check from the runner
473         # so if we have any fatal error it must be raised via these methods
474         # otherwise we will not terminate
475
476         result.update(self.collector.get_kpi())
477
478     def teardown(self):
479         """ Stop the collector and terminate VNF & TG instance
480
481         :return
482         """
483
484         try:
485             try:
486                 self.collector.stop()
487                 for vnf in self.vnfs:
488                     LOG.info("Stopping %s", vnf.name)
489                     vnf.terminate()
490                 LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
491             finally:
492                 terminate_children()
493         except Exception:
494             # catch any exception in teardown and convert to simple exception
495             # never pass exceptions back to multiprocessing, because some exceptions can
496             # be unpicklable
497             # https://bugs.python.org/issue9400
498             LOG.exception("")
499             raise RuntimeError("Error in teardown")
500
501     def pre_run_wait_time(self, time_seconds):  # pragma: no cover
502         """Time waited before executing the run method"""
503         time.sleep(time_seconds)
504
505     def post_run_wait_time(self, time_seconds):  # pragma: no cover
506         """Time waited after executing the run method"""
507         pass