Merge "Enable IP_ROUTING for netperf UDP_STREAM test"
[yardstick.git] / yardstick / benchmark / scenarios / networking / vnf_generic.py
index 905f1f4..450f83f 100644 (file)
@@ -25,9 +25,11 @@ import re
 from itertools import chain
 
 import six
+import yaml
 from collections import defaultdict
 
 from yardstick.benchmark.scenarios import base
+from yardstick.common.constants import LOG_DIR
 from yardstick.common.utils import import_modules_from_package, itersubclasses
 from yardstick.common.yaml_loader import yaml_load
 from yardstick.network_services.collector.subscriber import Collector
@@ -365,6 +367,36 @@ class NetworkServiceTestCase(base.Scenario):
                 'ifindex': netdev['ifindex'],
             })
 
+    def _generate_pod_yaml(self):
+        context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
+        # convert OrderedDict to a list
+        # pod.yaml nodes is a list
+        nodes = []
+        for node in self.context_cfg["nodes"].values():
+            # name field is required
+            # remove context suffix
+            node['name'] = node['name'].split('.')[0]
+            nodes.append(node)
+        nodes = self._convert_pkeys_to_string(nodes)
+        pod_dict = {
+            "nodes": nodes,
+            "networks": self.context_cfg["networks"]
+        }
+        with open(context_yaml, "w") as context_out:
+            yaml.safe_dump(pod_dict, context_out, default_flow_style=False,
+                           explicit_start=True)
+
+    @staticmethod
+    def _convert_pkeys_to_string(nodes):
+        # make copy because we are mutating
+        nodes = nodes[:]
+        for i, node in enumerate(nodes):
+            try:
+                nodes[i] = dict(node, pkey=ssh.convert_key_to_str(node["pkey"]))
+            except KeyError:
+                pass
+        return nodes
+
     TOPOLOGY_REQUIRED_KEYS = frozenset({
         "vpci", "local_ip", "netmask", "local_mac", "driver"})
 
@@ -405,6 +437,8 @@ class NetworkServiceTestCase(base.Scenario):
                         "Require interface fields '%s' not found, topology file "
                         "corrupted" % ', '.join(missing))
 
+        # we have to generate pod.yaml here so we have vpci and driver
+        self._generate_pod_yaml()
         # 3. Use topology file to find connections & resolve dest address
         self._resolve_topology()
         self._update_context_with_topology()
@@ -470,6 +504,10 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
         ext_intfs = vnfd["vdu"][0]["external-interface"] = []
         # have to sort so xe0 goes first
         for intf_name, intf in sorted(node['interfaces'].items()):
+            # only interfaces with vld_id are added.
+            # Thus there are two layers of filters, only intefaces with vld_id
+            # show up in interfaces, and only interfaces with traffic profiles
+            # are used by the generators
             if intf.get('vld_id'):
                 # force dpkd_port_num to int so we can do reverse lookup
                 try:
@@ -511,6 +549,13 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
             vnfd = vnfdgen.generate_vnfd(vnf_model, node)
             # TODO: here add extra context_cfg["nodes"] regardless of template
             vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
+            # force inject pkey if it exists
+            # we want to standardize Heat using pkey as a string so we don't rely
+            # on the filesystem
+            try:
+                vnfd['mgmt-interface']['pkey'] = node['pkey']
+            except KeyError:
+                pass
             self.create_interfaces_from_node(vnfd, node)
             vnf_impl = self.get_vnf_impl(vnfd['id'])
             vnf_instance = vnf_impl(node_name, vnfd)