Add arguments to the traffic profile render
[yardstick.git] / yardstick / benchmark / scenarios / networking / vnf_generic.py
index 3f61116..c43dabf 100644 (file)
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-""" NSPerf specific scenario definition """
-
-from __future__ import absolute_import
 
+from collections import defaultdict
+import copy
 import logging
-import errno
-
 import ipaddress
+from itertools import chain
 import os
-import sys
 import re
-from itertools import chain
+import sys
 
 import six
 import yaml
-from collections import defaultdict
 
-from yardstick.benchmark.scenarios import base
+from yardstick.benchmark.scenarios import base as scenario_base
 from yardstick.common.constants import LOG_DIR
-from yardstick.common.utils import import_modules_from_package, itersubclasses
+from yardstick.common.process import terminate_children
+from yardstick.common import utils
 from yardstick.common.yaml_loader import yaml_load
 from yardstick.network_services.collector.subscriber import Collector
 from yardstick.network_services.vnf_generic import vnfdgen
 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
-from yardstick.network_services.traffic_profile.base import TrafficProfile
+from yardstick.network_services import traffic_profile
+from yardstick.network_services.traffic_profile import base as tprofile_base
 from yardstick.network_services.utils import get_nsb_option
 from yardstick import ssh
 
 
+traffic_profile.register_modules()
+
+
 LOG = logging.getLogger(__name__)
 
 
@@ -88,35 +89,7 @@ class SshManager(object):
             self.conn.close()
 
 
-def find_relative_file(path, task_path):
-    """
-    Find file in one of places: in abs of path or
-    relative to TC scenario file. In this order.
-
-    :param path:
-    :param task_path:
-    :return str: full path to file
-    """
-    # fixme: create schema to validate all fields have been provided
-    for lookup in [os.path.abspath(path), os.path.join(task_path, path)]:
-        try:
-            with open(lookup):
-                return lookup
-        except IOError:
-            pass
-    raise IOError(errno.ENOENT, 'Unable to find {} file'.format(path))
-
-
-def open_relative_file(path, task_path):
-    try:
-        return open(path)
-    except IOError as e:
-        if e.errno == errno.ENOENT:
-            return open(os.path.join(task_path, path))
-        raise
-
-
-class NetworkServiceTestCase(base.Scenario):
+class NetworkServiceTestCase(scenario_base.Scenario):
     """Class handles Generic framework to do pre-deployment VNF &
        Network service testing  """
 
@@ -128,8 +101,8 @@ class NetworkServiceTestCase(base.Scenario):
         self.context_cfg = context_cfg
 
         # fixme: create schema to validate all fields have been provided
-        with open_relative_file(scenario_cfg["topology"],
-                                scenario_cfg['task_path']) as stream:
+        with utils.open_relative_file(scenario_cfg["topology"],
+                                      scenario_cfg['task_path']) as stream:
             topology_yaml = yaml_load(stream)
 
         self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
@@ -187,6 +160,12 @@ class NetworkServiceTestCase(base.Scenario):
             for index, publicip in enumerate(fflow.get("public_ip", [])):
                 flow["public_ip_{}".format(index)] = publicip
 
+            for index, src_port in enumerate(fflow.get("src_port", [])):
+                flow["src_port_{}".format(index)] = src_port
+
+            for index, dst_port in enumerate(fflow.get("dst_port", [])):
+                flow["dst_port_{}".format(index)] = dst_port
+
             flow["count"] = fflow["count"]
         except KeyError:
             flow = {}
@@ -202,21 +181,22 @@ class NetworkServiceTestCase(base.Scenario):
     def _get_traffic_profile(self):
         profile = self.scenario_cfg["traffic_profile"]
         path = self.scenario_cfg["task_path"]
-        with open_relative_file(profile, path) as infile:
+        with utils.open_relative_file(profile, path) as infile:
             return infile.read()
 
     def _fill_traffic_profile(self):
-        traffic_mapping = self._get_traffic_profile()
-        traffic_map_data = {
+        tprofile = self._get_traffic_profile()
+        extra_args = self.scenario_cfg.get('extra_args', {})
+        tprofile_data = {
             'flow': self._get_traffic_flow(),
             'imix': self._get_traffic_imix(),
-            TrafficProfile.UPLINK: {},
-            TrafficProfile.DOWNLINK: {},
+            tprofile_base.TrafficProfile.UPLINK: {},
+            tprofile_base.TrafficProfile.DOWNLINK: {},
+            'extra_args': extra_args
         }
 
-        traffic_vnfd = vnfdgen.generate_vnfd(traffic_mapping, traffic_map_data)
-        self.traffic_profile = TrafficProfile.get(traffic_vnfd)
-        return self.traffic_profile
+        traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
+        self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd)
 
     def _find_vnf_name_from_id(self, vnf_id):
         return next((vnfd["vnfd-id-ref"]
@@ -375,13 +355,7 @@ class NetworkServiceTestCase(base.Scenario):
         context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
         # convert OrderedDict to a list
         # pod.yaml nodes is a list
-        nodes = []
-        for node in self.context_cfg["nodes"].values():
-            # name field is required
-            # remove context suffix
-            node['name'] = node['name'].split('.')[0]
-            nodes.append(node)
-        nodes = self._convert_pkeys_to_string(nodes)
+        nodes = [self._serialize_node(node) for node in self.context_cfg["nodes"].values()]
         pod_dict = {
             "nodes": nodes,
             "networks": self.context_cfg["networks"]
@@ -391,15 +365,16 @@ class NetworkServiceTestCase(base.Scenario):
                            explicit_start=True)
 
     @staticmethod
-    def _convert_pkeys_to_string(nodes):
-        # make copy because we are mutating
-        nodes = nodes[:]
-        for i, node in enumerate(nodes):
-            try:
-                nodes[i] = dict(node, pkey=ssh.convert_key_to_str(node["pkey"]))
-            except KeyError:
-                pass
-        return nodes
+    def _serialize_node(node):
+        new_node = copy.deepcopy(node)
+        # name field is required
+        # remove context suffix
+        new_node["name"] = node['name'].split('.')[0]
+        try:
+            new_node["pkey"] = ssh.convert_key_to_str(node["pkey"])
+        except KeyError:
+            pass
+        return new_node
 
     TOPOLOGY_REQUIRED_KEYS = frozenset({
         "vpci", "local_ip", "netmask", "local_mac", "driver"})
@@ -447,12 +422,14 @@ class NetworkServiceTestCase(base.Scenario):
         self._resolve_topology()
         self._update_context_with_topology()
 
-    FIND_NETDEVICE_STRING = r"""find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
-$1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
-$1/device/subsystem_vendor $1/device/subsystem_device ; \
-printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
-' sh  \{\}/* \;
-"""
+    FIND_NETDEVICE_STRING = (
+        r"""find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
+        $1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
+        $1/device/subsystem_vendor $1/device/subsystem_device ; \
+        printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
+        ' sh  \{\}/* \;
+        """)
+
     BASE_ADAPTER_RE = re.compile(
         '^/sys/devices/(.*)/net/([^/]*)/([^:]*):(.*)$', re.M)
 
@@ -484,13 +461,14 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
         :param vnf_model_id: parsed vnfd model ID field
         :return: subclass of GenericVNF
         """
-        import_modules_from_package(
+        utils.import_modules_from_package(
             "yardstick.network_services.vnf_generic.vnf")
         expected_name = vnf_model_id
         classes_found = []
 
         def impl():
-            for name, class_ in ((c.__name__, c) for c in itersubclasses(GenericVNF)):
+            for name, class_ in ((c.__name__, c) for c in
+                                 utils.itersubclasses(GenericVNF)):
                 if name == expected_name:
                     yield class_
                 classes_found.append(name)
@@ -552,7 +530,7 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
                 LOG.debug("no model for %s, skipping", node_name)
                 continue
             file_path = scenario_cfg['task_path']
-            with open_relative_file(file_name, file_path) as stream:
+            with utils.open_relative_file(file_name, file_path) as stream:
                 vnf_model = stream.read()
             vnfd = vnfdgen.generate_vnfd(vnf_model, node)
             # TODO: here add extra context_cfg["nodes"] regardless of template
@@ -596,7 +574,8 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
                 vnf.instantiate(self.scenario_cfg, self.context_cfg)
                 LOG.info("Waiting for %s to instantiate", vnf.name)
                 vnf.wait_for_instantiate()
-        except RuntimeError:
+        except:
+            LOG.exception("")
             for vnf in self.vnfs:
                 vnf.terminate()
             raise
@@ -607,7 +586,7 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
             traffic_gen.listen_traffic(self.traffic_profile)
 
         # register collector with yardstick for KPI collection.
-        self.collector = Collector(self.vnfs, self.traffic_profile)
+        self.collector = Collector(self.vnfs, self.context_cfg["nodes"], self.traffic_profile)
         self.collector.start()
 
         # Start the actual traffic
@@ -623,11 +602,11 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
         :return: None
         """
 
-        for vnf in self.vnfs:
-            # Result example:
-            # {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
-            LOG.debug("collect KPI for %s", vnf.name)
-            result.update(self.collector.get_kpi(vnf))
+        # this is the only method that is check from the runner
+        # so if we have any fatal error it must be raised via these methods
+        # otherwise we will not terminate
+
+        result.update(self.collector.get_kpi())
 
     def teardown(self):
         """ Stop the collector and terminate VNF & TG instance
@@ -635,7 +614,19 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
         :return
         """
 
-        self.collector.stop()
-        for vnf in self.vnfs:
-            LOG.info("Stopping %s", vnf.name)
-            vnf.terminate()
+        try:
+            try:
+                self.collector.stop()
+                for vnf in self.vnfs:
+                    LOG.info("Stopping %s", vnf.name)
+                    vnf.terminate()
+                LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
+            finally:
+                terminate_children()
+        except Exception:
+            # catch any exception in teardown and convert to simple exception
+            # never pass exceptions back to multiprocessing, because some exceptions can
+            # be unpicklable
+            # https://bugs.python.org/issue9400
+            LOG.exception("")
+            raise RuntimeError("Error in teardown")