Merge "Make security group configurable - dovetail"
[yardstick.git] / yardstick / benchmark / scenarios / networking / vnf_generic.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import copy
16 import ipaddress
17 from itertools import chain
18 import logging
19 import os
20 import sys
21 import time
22
23 import six
24 import yaml
25
26 from yardstick.benchmark.contexts import base as context_base
27 from yardstick.benchmark.scenarios import base as scenario_base
28 from yardstick.common.constants import LOG_DIR
29 from yardstick.common import exceptions
30 from yardstick.common.process import terminate_children
31 from yardstick.common import utils
32 from yardstick.network_services.collector.subscriber import Collector
33 from yardstick.network_services.vnf_generic import vnfdgen
34 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
35 from yardstick.network_services import traffic_profile
36 from yardstick.network_services.traffic_profile import base as tprofile_base
37 from yardstick.network_services.utils import get_nsb_option
38 from yardstick import ssh
39
40
41 traffic_profile.register_modules()
42
43
44 LOG = logging.getLogger(__name__)
45
46
47 class NetworkServiceTestCase(scenario_base.Scenario):
48     """Class handles Generic framework to do pre-deployment VNF &
49        Network service testing  """
50
51     __scenario_type__ = "NSPerf"
52
53     def __init__(self, scenario_cfg, context_cfg):  # pragma: no cover
54         super(NetworkServiceTestCase, self).__init__()
55         self.scenario_cfg = scenario_cfg
56         self.context_cfg = context_cfg
57
58         self._render_topology()
59         self.vnfs = []
60         self.collector = None
61         self.traffic_profile = None
62         self.node_netdevs = {}
63         self.bin_path = get_nsb_option('bin_path', '')
64         self._mq_ids = []
65
66     def _get_ip_flow_range(self, ip_start_range):
67         """Retrieve a CIDR first and last viable IPs
68
69         :param ip_start_range: could be the IP range itself or a dictionary
70                with the host name and the port.
71         :return: (str) IP range (min, max) with this format "x.x.x.x-y.y.y.y"
72         """
73         if isinstance(ip_start_range, six.string_types):
74             return ip_start_range
75
76         node_name, range_or_interface = next(iter(ip_start_range.items()),
77                                              (None, '0.0.0.0'))
78         if node_name is None:
79             return range_or_interface
80
81         node = self.context_cfg['nodes'].get(node_name, {})
82         interface = node.get('interfaces', {}).get(range_or_interface)
83         if interface:
84             ip = interface['local_ip']
85             mask = interface['netmask']
86         else:
87             ip = '0.0.0.0'
88             mask = '255.255.255.0'
89
90         ipaddr = ipaddress.ip_network(
91             six.text_type('{}/{}'.format(ip, mask)), strict=False)
92         if ipaddr.prefixlen + 2 < ipaddr.max_prefixlen:
93             ip_addr_range = '{}-{}'.format(ipaddr[2], ipaddr[-2])
94         else:
95             LOG.warning('Only single IP in range %s', ipaddr)
96             ip_addr_range = ip
97         return ip_addr_range
98
99     def _get_traffic_flow(self):
100         flow = {}
101         try:
102             # TODO: should be .0  or .1 so we can use list
103             # but this also roughly matches uplink_0, downlink_0
104             fflow = self.scenario_cfg["options"]["flow"]
105             for index, src in enumerate(fflow.get("src_ip", [])):
106                 flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)
107
108             for index, dst in enumerate(fflow.get("dst_ip", [])):
109                 flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)
110
111             for index, publicip in enumerate(fflow.get("public_ip", [])):
112                 flow["public_ip_{}".format(index)] = publicip
113
114             for index, src_port in enumerate(fflow.get("src_port", [])):
115                 flow["src_port_{}".format(index)] = src_port
116
117             for index, dst_port in enumerate(fflow.get("dst_port", [])):
118                 flow["dst_port_{}".format(index)] = dst_port
119
120             if "count" in fflow:
121                 flow["count"] = fflow["count"]
122
123             if "seed" in fflow:
124                 flow["seed"] = fflow["seed"]
125
126         except KeyError:
127             flow = {}
128         return {"flow": flow}
129
130     def _get_traffic_imix(self):
131         try:
132             imix = {"imix": self.scenario_cfg['options']['framesize']}
133         except KeyError:
134             imix = {}
135         return imix
136
137     def _get_traffic_profile(self):
138         profile = self.scenario_cfg["traffic_profile"]
139         path = self.scenario_cfg["task_path"]
140         with utils.open_relative_file(profile, path) as infile:
141             return infile.read()
142
143     def _get_duration(self):
144         options = self.scenario_cfg.get('options', {})
145         return options.get('duration',
146                            tprofile_base.TrafficProfileConfig.DEFAULT_DURATION)
147
148     def _fill_traffic_profile(self):
149         tprofile = self._get_traffic_profile()
150         extra_args = self.scenario_cfg.get('extra_args', {})
151         tprofile_data = {
152             'flow': self._get_traffic_flow(),
153             'imix': self._get_traffic_imix(),
154             tprofile_base.TrafficProfile.UPLINK: {},
155             tprofile_base.TrafficProfile.DOWNLINK: {},
156             'extra_args': extra_args,
157             'duration': self._get_duration()}
158         traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
159         self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd)
160
161     def _get_topology(self):
162         topology = self.scenario_cfg["topology"]
163         path = self.scenario_cfg["task_path"]
164         with utils.open_relative_file(topology, path) as infile:
165             return infile.read()
166
167     def _render_topology(self):
168         topology = self._get_topology()
169         topology_args = self.scenario_cfg.get('extra_args', {})
170         topolgy_data = {
171             'extra_args': topology_args
172         }
173         topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
174         self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
175
176     def _find_vnf_name_from_id(self, vnf_id):  # pragma: no cover
177         return next((vnfd["vnfd-id-ref"]
178                      for vnfd in self.topology["constituent-vnfd"]
179                      if vnf_id == vnfd["member-vnf-index"]), None)
180
181     def _find_vnfd_from_vnf_idx(self, vnf_id):  # pragma: no cover
182         return next((vnfd
183                      for vnfd in self.topology["constituent-vnfd"]
184                      if vnf_id == vnfd["member-vnf-index"]), None)
185
186     @staticmethod
187     def find_node_if(nodes, name, if_name, vld_id):  # pragma: no cover
188         try:
189             # check for xe0, xe1
190             intf = nodes[name]["interfaces"][if_name]
191         except KeyError:
192             # if not xe0, then maybe vld_id,  uplink_0, downlink_0
193             # pop it and re-insert with the correct name from topology
194             intf = nodes[name]["interfaces"].pop(vld_id)
195             nodes[name]["interfaces"][if_name] = intf
196         return intf
197
198     def _resolve_topology(self):
199         for vld in self.topology["vld"]:
200             try:
201                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
202             except (ValueError, TypeError):
203                 raise exceptions.IncorrectConfig(
204                     error_msg='Topology file corrupted, wrong endpoint count '
205                               'for connection')
206
207             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
208             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
209
210             node0_if_name = node0_data["vnfd-connection-point-ref"]
211             node1_if_name = node1_data["vnfd-connection-point-ref"]
212
213             try:
214                 nodes = self.context_cfg["nodes"]
215                 node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
216                 node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
217
218                 # names so we can do reverse lookups
219                 node0_if["ifname"] = node0_if_name
220                 node1_if["ifname"] = node1_if_name
221
222                 node0_if["node_name"] = node0_name
223                 node1_if["node_name"] = node1_name
224
225                 node0_if["vld_id"] = vld["id"]
226                 node1_if["vld_id"] = vld["id"]
227
228                 # set peer name
229                 node0_if["peer_name"] = node1_name
230                 node1_if["peer_name"] = node0_name
231
232                 # set peer interface name
233                 node0_if["peer_ifname"] = node1_if_name
234                 node1_if["peer_ifname"] = node0_if_name
235
236                 # just load the network
237                 vld_networks = {n.get('vld_id', name): n for name, n in
238                                 self.context_cfg["networks"].items()}
239
240                 node0_if["network"] = vld_networks.get(vld["id"], {})
241                 node1_if["network"] = vld_networks.get(vld["id"], {})
242
243                 node0_if["dst_mac"] = node1_if["local_mac"]
244                 node0_if["dst_ip"] = node1_if["local_ip"]
245
246                 node1_if["dst_mac"] = node0_if["local_mac"]
247                 node1_if["dst_ip"] = node0_if["local_ip"]
248
249             except KeyError:
250                 LOG.exception("")
251                 raise exceptions.IncorrectConfig(
252                     error_msg='Required interface not found, topology file '
253                               'corrupted')
254
255         for vld in self.topology['vld']:
256             try:
257                 node0_data, node1_data = vld["vnfd-connection-point-ref"]
258             except (ValueError, TypeError):
259                 raise exceptions.IncorrectConfig(
260                     error_msg='Topology file corrupted, wrong endpoint count '
261                               'for connection')
262
263             node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
264             node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
265
266             node0_if_name = node0_data["vnfd-connection-point-ref"]
267             node1_if_name = node1_data["vnfd-connection-point-ref"]
268
269             nodes = self.context_cfg["nodes"]
270             node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
271             node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])
272
273             # add peer interface dict, but remove circular link
274             # TODO: don't waste memory
275             node0_copy = node0_if.copy()
276             node1_copy = node1_if.copy()
277             node0_if["peer_intf"] = node1_copy
278             node1_if["peer_intf"] = node0_copy
279
280     def _update_context_with_topology(self):  # pragma: no cover
281         for vnfd in self.topology["constituent-vnfd"]:
282             vnf_idx = vnfd["member-vnf-index"]
283             vnf_name = self._find_vnf_name_from_id(vnf_idx)
284             vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
285             self.context_cfg["nodes"][vnf_name].update(vnfd)
286
287     def _generate_pod_yaml(self):  # pragma: no cover
288         context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
289         # convert OrderedDict to a list
290         # pod.yaml nodes is a list
291         nodes = [self._serialize_node(node) for node in self.context_cfg["nodes"].values()]
292         pod_dict = {
293             "nodes": nodes,
294             "networks": self.context_cfg["networks"]
295         }
296         with open(context_yaml, "w") as context_out:
297             yaml.safe_dump(pod_dict, context_out, default_flow_style=False,
298                            explicit_start=True)
299
300     @staticmethod
301     def _serialize_node(node):  # pragma: no cover
302         new_node = copy.deepcopy(node)
303         # name field is required
304         # remove context suffix
305         new_node["name"] = node['name'].split('.')[0]
306         try:
307             new_node["pkey"] = ssh.convert_key_to_str(node["pkey"])
308         except KeyError:
309             pass
310         return new_node
311
312     def map_topology_to_infrastructure(self):
313         """ This method should verify if the available resources defined in pod.yaml
314         match the topology.yaml file.
315
316         :return: None. Side effect: context_cfg is updated
317         """
318         # 3. Use topology file to find connections & resolve dest address
319         self._resolve_topology()
320         self._update_context_with_topology()
321
322     @classmethod
323     def get_vnf_impl(cls, vnf_model_id):  # pragma: no cover
324         """ Find the implementing class from vnf_model["vnf"]["name"] field
325
326         :param vnf_model_id: parsed vnfd model ID field
327         :return: subclass of GenericVNF
328         """
329         utils.import_modules_from_package(
330             "yardstick.network_services.vnf_generic.vnf")
331         expected_name = vnf_model_id
332         classes_found = []
333
334         def impl():
335             for name, class_ in ((c.__name__, c) for c in
336                                  utils.itersubclasses(GenericVNF)):
337                 if name == expected_name:
338                     yield class_
339                 classes_found.append(name)
340
341         try:
342             return next(impl())
343         except StopIteration:
344             pass
345
346         message = ('No implementation for %s found in %s'
347                    % (expected_name, classes_found))
348         raise exceptions.IncorrectConfig(error_msg=message)
349
350     @staticmethod
351     def create_interfaces_from_node(vnfd, node):  # pragma: no cover
352         ext_intfs = vnfd["vdu"][0]["external-interface"] = []
353         # have to sort so xe0 goes first
354         for intf_name, intf in sorted(node['interfaces'].items()):
355             # only interfaces with vld_id are added.
356             # Thus there are two layers of filters, only intefaces with vld_id
357             # show up in interfaces, and only interfaces with traffic profiles
358             # are used by the generators
359             if intf.get('vld_id'):
360                 # force dpkd_port_num to int so we can do reverse lookup
361                 try:
362                     intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
363                 except KeyError:
364                     pass
365                 ext_intf = {
366                     "name": intf_name,
367                     "virtual-interface": intf,
368                     "vnfd-connection-point-ref": intf_name,
369                 }
370                 ext_intfs.append(ext_intf)
371
372     def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
373         """ Create VNF objects based on YAML descriptors
374
375         :param scenario_cfg:
376         :type scenario_cfg:
377         :param context_cfg:
378         :return:
379         """
380         trex_lib_path = get_nsb_option('trex_client_lib')
381         sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))
382
383         if scenario_cfg is None:
384             scenario_cfg = self.scenario_cfg
385
386         if context_cfg is None:
387             context_cfg = self.context_cfg
388
389         vnfs = []
390         # we assume OrderedDict for consistency in instantiation
391         for node_name, node in context_cfg["nodes"].items():
392             LOG.debug(node)
393             try:
394                 file_name = node["VNF model"]
395             except KeyError:
396                 LOG.debug("no model for %s, skipping", node_name)
397                 continue
398             file_path = scenario_cfg['task_path']
399             with utils.open_relative_file(file_name, file_path) as stream:
400                 vnf_model = stream.read()
401             vnfd = vnfdgen.generate_vnfd(vnf_model, node)
402             # TODO: here add extra context_cfg["nodes"] regardless of template
403             vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
404             # force inject pkey if it exists
405             # we want to standardize Heat using pkey as a string so we don't rely
406             # on the filesystem
407             try:
408                 vnfd['mgmt-interface']['pkey'] = node['pkey']
409             except KeyError:
410                 pass
411             self.create_interfaces_from_node(vnfd, node)
412             vnf_impl = self.get_vnf_impl(vnfd['id'])
413             vnf_instance = vnf_impl(node_name, vnfd, scenario_cfg['task_id'])
414             vnfs.append(vnf_instance)
415
416         self.vnfs = vnfs
417         return vnfs
418
419     def setup(self):
420         """Setup infrastructure, provission VNFs & start traffic"""
421         # 1. Verify if infrastructure mapping can meet topology
422         self.map_topology_to_infrastructure()
423         # 1a. Load VNF models
424         self.load_vnf_models()
425         # 1b. Fill traffic profile with information from topology
426         self._fill_traffic_profile()
427
428         # 2. Provision VNFs
429
430         # link events will cause VNF application to exit
431         # so we should start traffic runners before VNFs
432         traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
433         non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
434         try:
435             for vnf in chain(traffic_runners, non_traffic_runners):
436                 LOG.info("Instantiating %s", vnf.name)
437                 vnf.instantiate(self.scenario_cfg, self.context_cfg)
438                 LOG.info("Waiting for %s to instantiate", vnf.name)
439                 vnf.wait_for_instantiate()
440         except:
441             LOG.exception("")
442             for vnf in self.vnfs:
443                 vnf.terminate()
444             raise
445
446         # we have to generate pod.yaml here after VNF has probed so we know vpci and driver
447         self._generate_pod_yaml()
448
449         # 3. Run experiment
450         # Start listeners first to avoid losing packets
451         for traffic_gen in traffic_runners:
452             traffic_gen.listen_traffic(self.traffic_profile)
453
454         # register collector with yardstick for KPI collection.
455         self.collector = Collector(self.vnfs, context_base.Context.get_physical_nodes())
456         self.collector.start()
457
458         # Start the actual traffic
459         for traffic_gen in traffic_runners:
460             LOG.info("Starting traffic on %s", traffic_gen.name)
461             traffic_gen.run_traffic(self.traffic_profile)
462             self._mq_ids.append(traffic_gen.get_mq_producer_id())
463
464     def get_mq_ids(self):  # pragma: no cover
465         """Return stored MQ producer IDs"""
466         return self._mq_ids
467
468     def run(self, result):  # yardstick API
469         """ Yardstick calls run() at intervals defined in the yaml and
470             produces timestamped samples
471
472         :param result: dictionary with results to update
473         :return: None
474         """
475
476         # this is the only method that is check from the runner
477         # so if we have any fatal error it must be raised via these methods
478         # otherwise we will not terminate
479
480         result.update(self.collector.get_kpi())
481
482     def teardown(self):
483         """ Stop the collector and terminate VNF & TG instance
484
485         :return
486         """
487
488         try:
489             try:
490                 self.collector.stop()
491                 for vnf in self.vnfs:
492                     LOG.info("Stopping %s", vnf.name)
493                     vnf.terminate()
494                 LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
495             finally:
496                 terminate_children()
497         except Exception:
498             # catch any exception in teardown and convert to simple exception
499             # never pass exceptions back to multiprocessing, because some exceptions can
500             # be unpicklable
501             # https://bugs.python.org/issue9400
502             LOG.exception("")
503             raise RuntimeError("Error in teardown")
504
505     def pre_run_wait_time(self, time_seconds):  # pragma: no cover
506         """Time waited before executing the run method"""
507         time.sleep(time_seconds)
508
509     def post_run_wait_time(self, time_seconds):  # pragma: no cover
510         """Time waited after executing the run method"""
511         pass