X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=yardstick%2Fbenchmark%2Fscenarios%2Fnetworking%2Fvnf_generic.py;h=78f866e25f66b1722a085480b11845b823d22233;hb=d984fd894a50d1aebb4608c0efc87a549f003215;hp=0fab45480a82737b01ed9cfb9698d776009cb840;hpb=e49f9eff83fe3a610fc95aee197938ac3158a0b7;p=yardstick.git diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py index 0fab45480..78f866e25 100644 --- a/yardstick/benchmark/scenarios/networking/vnf_generic.py +++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py @@ -11,115 +11,39 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -""" NSPerf specific scenario definition """ - -from __future__ import absolute_import +import copy import logging -import errno +import time import ipaddress - -import copy +from itertools import chain import os import sys -import re -from itertools import chain import six import yaml -from collections import defaultdict -from yardstick.benchmark.scenarios import base +from yardstick.benchmark.scenarios import base as scenario_base +from yardstick.error import IncorrectConfig from yardstick.common.constants import LOG_DIR from yardstick.common.process import terminate_children -from yardstick.common.utils import import_modules_from_package, itersubclasses -from yardstick.common.yaml_loader import yaml_load +from yardstick.common import utils from yardstick.network_services.collector.subscriber import Collector from yardstick.network_services.vnf_generic import vnfdgen from yardstick.network_services.vnf_generic.vnf.base import GenericVNF -from yardstick.network_services.traffic_profile.base import TrafficProfile +from yardstick.network_services import traffic_profile +from yardstick.network_services.traffic_profile import base as tprofile_base from yardstick.network_services.utils import get_nsb_option from yardstick import ssh +traffic_profile.register_modules() -LOG = logging.getLogger(__name__) - - -class SSHError(Exception): - """Class handles ssh connection error exception""" - pass - - -class SSHTimeout(SSHError): - """Class handles ssh connection timeout exception""" - pass - - -class IncorrectConfig(Exception): - """Class handles incorrect configuration during setup""" - pass - - -class IncorrectSetup(Exception): - """Class handles incorrect setup during setup""" - pass - - -class SshManager(object): - def __init__(self, node, timeout=120): - super(SshManager, self).__init__() - self.node = node - self.conn = None - self.timeout = timeout - def __enter__(self): - """ - args -> network device mappings - returns -> ssh connection ready to be used - """ - try: - self.conn = ssh.SSH.from_node(self.node) - self.conn.wait(timeout=self.timeout) - except SSHError as error: - LOG.info("connect failed to %s, due to %s", self.node["ip"], error) - # self.conn defaults to None - return self.conn - - def __exit__(self, exc_type, exc_val, exc_tb): - if self.conn: - self.conn.close() - - -def find_relative_file(path, task_path): - """ - Find file in one of places: in abs of path or - relative to TC scenario file. In this order. - - :param path: - :param task_path: - :return str: full path to file - """ - # fixme: create schema to validate all fields have been provided - for lookup in [os.path.abspath(path), os.path.join(task_path, path)]: - try: - with open(lookup): - return lookup - except IOError: - pass - raise IOError(errno.ENOENT, 'Unable to find {} file'.format(path)) - - -def open_relative_file(path, task_path): - try: - return open(path) - except IOError as e: - if e.errno == errno.ENOENT: - return open(os.path.join(task_path, path)) - raise +LOG = logging.getLogger(__name__) -class NetworkServiceTestCase(base.Scenario): +class NetworkServiceTestCase(scenario_base.Scenario): """Class handles Generic framework to do pre-deployment VNF & Network service testing """ @@ -130,16 +54,12 @@ class NetworkServiceTestCase(base.Scenario): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg - # fixme: create schema to validate all fields have been provided - with open_relative_file(scenario_cfg["topology"], - scenario_cfg['task_path']) as stream: - topology_yaml = yaml_load(stream) - - self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0] + self._render_topology() self.vnfs = [] self.collector = None self.traffic_profile = None self.node_netdevs = {} + self.bin_path = get_nsb_option('bin_path', '') def _get_ip_flow_range(self, ip_start_range): @@ -190,6 +110,12 @@ class NetworkServiceTestCase(base.Scenario): for index, publicip in enumerate(fflow.get("public_ip", [])): flow["public_ip_{}".format(index)] = publicip + for index, src_port in enumerate(fflow.get("src_port", [])): + flow["src_port_{}".format(index)] = src_port + + for index, dst_port in enumerate(fflow.get("dst_port", [])): + flow["dst_port_{}".format(index)] = dst_port + flow["count"] = fflow["count"] except KeyError: flow = {} @@ -205,37 +131,47 @@ class NetworkServiceTestCase(base.Scenario): def _get_traffic_profile(self): profile = self.scenario_cfg["traffic_profile"] path = self.scenario_cfg["task_path"] - with open_relative_file(profile, path) as infile: + with utils.open_relative_file(profile, path) as infile: + return infile.read() + + def _get_topology(self): + topology = self.scenario_cfg["topology"] + path = self.scenario_cfg["task_path"] + with utils.open_relative_file(topology, path) as infile: return infile.read() def _fill_traffic_profile(self): - traffic_mapping = self._get_traffic_profile() - traffic_map_data = { + tprofile = self._get_traffic_profile() + extra_args = self.scenario_cfg.get('extra_args', {}) + tprofile_data = { 'flow': self._get_traffic_flow(), 'imix': self._get_traffic_imix(), - TrafficProfile.UPLINK: {}, - TrafficProfile.DOWNLINK: {}, + tprofile_base.TrafficProfile.UPLINK: {}, + tprofile_base.TrafficProfile.DOWNLINK: {}, + 'extra_args': extra_args } - traffic_vnfd = vnfdgen.generate_vnfd(traffic_mapping, traffic_map_data) - self.traffic_profile = TrafficProfile.get(traffic_vnfd) - return self.traffic_profile + traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data) + self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd) + + def _render_topology(self): + topology = self._get_topology() + topology_args = self.scenario_cfg.get('extra_args', {}) + topolgy_data = { + 'extra_args': topology_args + } + topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data) + self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0] def _find_vnf_name_from_id(self, vnf_id): return next((vnfd["vnfd-id-ref"] for vnfd in self.topology["constituent-vnfd"] if vnf_id == vnfd["member-vnf-index"]), None) - @staticmethod - def get_vld_networks(networks): - # network name is vld_id - vld_map = {} - for name, n in networks.items(): - try: - vld_map[n['vld_id']] = n - except KeyError: - vld_map[name] = n - return vld_map + def _find_vnfd_from_vnf_idx(self, vnf_id): + return next((vnfd + for vnfd in self.topology["constituent-vnfd"] + if vnf_id == vnfd["member-vnf-index"]), None) @staticmethod def find_node_if(nodes, name, if_name, vld_id): @@ -287,7 +223,9 @@ class NetworkServiceTestCase(base.Scenario): node1_if["peer_ifname"] = node0_if_name # just load the network - vld_networks = self.get_vld_networks(self.context_cfg["networks"]) + vld_networks = {n.get('vld_id', name): n for name, n in + self.context_cfg["networks"].items()} + node0_if["network"] = vld_networks.get(vld["id"], {}) node1_if["network"] = vld_networks.get(vld["id"], {}) @@ -326,10 +264,6 @@ class NetworkServiceTestCase(base.Scenario): node0_if["peer_intf"] = node1_copy node1_if["peer_intf"] = node0_copy - def _find_vnfd_from_vnf_idx(self, vnf_idx): - return next((vnfd for vnfd in self.topology["constituent-vnfd"] - if vnf_idx == vnfd["member-vnf-index"]), None) - def _update_context_with_topology(self): for vnfd in self.topology["constituent-vnfd"]: vnf_idx = vnfd["member-vnf-index"] @@ -337,43 +271,6 @@ class NetworkServiceTestCase(base.Scenario): vnfd = self._find_vnfd_from_vnf_idx(vnf_idx) self.context_cfg["nodes"][vnf_name].update(vnfd) - def _probe_netdevs(self, node, node_dict, timeout=120): - try: - return self.node_netdevs[node] - except KeyError: - pass - - netdevs = {} - cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show" - - with SshManager(node_dict, timeout=timeout) as conn: - if conn: - exit_status = conn.execute(cmd)[0] - if exit_status != 0: - raise IncorrectSetup("Node's %s lacks ip tool." % node) - exit_status, stdout, _ = conn.execute( - self.FIND_NETDEVICE_STRING) - if exit_status != 0: - raise IncorrectSetup( - "Cannot find netdev info in sysfs" % node) - netdevs = node_dict['netdevs'] = self.parse_netdev_info(stdout) - - self.node_netdevs[node] = netdevs - return netdevs - - @classmethod - def _probe_missing_values(cls, netdevs, network): - - mac_lower = network['local_mac'].lower() - for netdev in netdevs.values(): - if netdev['address'].lower() != mac_lower: - continue - network.update({ - 'driver': netdev['driver'], - 'vpci': netdev['pci_bus_id'], - 'ifindex': netdev['ifindex'], - }) - def _generate_pod_yaml(self): context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id'])) # convert OrderedDict to a list @@ -399,82 +296,16 @@ class NetworkServiceTestCase(base.Scenario): pass return new_node - TOPOLOGY_REQUIRED_KEYS = frozenset({ - "vpci", "local_ip", "netmask", "local_mac", "driver"}) - def map_topology_to_infrastructure(self): """ This method should verify if the available resources defined in pod.yaml match the topology.yaml file. :return: None. Side effect: context_cfg is updated """ - num_nodes = len(self.context_cfg["nodes"]) - # OpenStack instance creation time is probably proportional to the number - # of instances - timeout = 120 * num_nodes - for node, node_dict in self.context_cfg["nodes"].items(): - - for network in node_dict["interfaces"].values(): - missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network) - if not missing: - continue - - # only ssh probe if there are missing values - # ssh probe won't work on Ixia, so we had better define all our values - try: - netdevs = self._probe_netdevs(node, node_dict, timeout=timeout) - except (SSHError, SSHTimeout): - raise IncorrectConfig( - "Unable to probe missing interface fields '%s', on node %s " - "SSH Error" % (', '.join(missing), node)) - try: - self._probe_missing_values(netdevs, network) - except KeyError: - pass - else: - missing = self.TOPOLOGY_REQUIRED_KEYS.difference( - network) - if missing: - raise IncorrectConfig( - "Require interface fields '%s' not found, topology file " - "corrupted" % ', '.join(missing)) - - # we have to generate pod.yaml here so we have vpci and driver - self._generate_pod_yaml() # 3. Use topology file to find connections & resolve dest address self._resolve_topology() self._update_context_with_topology() - FIND_NETDEVICE_STRING = r"""find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \ -$1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \ -$1/device/subsystem_vendor $1/device/subsystem_device ; \ -printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \ -' sh \{\}/* \; -""" - BASE_ADAPTER_RE = re.compile( - '^/sys/devices/(.*)/net/([^/]*)/([^:]*):(.*)$', re.M) - - @classmethod - def parse_netdev_info(cls, stdout): - network_devices = defaultdict(dict) - matches = cls.BASE_ADAPTER_RE.findall(stdout) - for bus_path, interface_name, name, value in matches: - dirname, bus_id = os.path.split(bus_path) - if 'virtio' in bus_id: - # for some stupid reason VMs include virtio1/ - # in PCI device path - bus_id = os.path.basename(dirname) - # remove extra 'device/' from 'device/vendor, - # device/subsystem_vendor', etc. - if 'device/' in name: - name = name.split('/')[1] - network_devices[interface_name][name] = value - network_devices[interface_name][ - 'interface_name'] = interface_name - network_devices[interface_name]['pci_bus_id'] = bus_id - # convert back to regular dict - return dict(network_devices) - @classmethod def get_vnf_impl(cls, vnf_model_id): """ Find the implementing class from vnf_model["vnf"]["name"] field @@ -482,13 +313,14 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \ :param vnf_model_id: parsed vnfd model ID field :return: subclass of GenericVNF """ - import_modules_from_package( + utils.import_modules_from_package( "yardstick.network_services.vnf_generic.vnf") expected_name = vnf_model_id classes_found = [] def impl(): - for name, class_ in ((c.__name__, c) for c in itersubclasses(GenericVNF)): + for name, class_ in ((c.__name__, c) for c in + utils.itersubclasses(GenericVNF)): if name == expected_name: yield class_ classes_found.append(name) @@ -541,7 +373,7 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \ context_cfg = self.context_cfg vnfs = [] - # we assume OrderedDict for consistenct in instantiation + # we assume OrderedDict for consistency in instantiation for node_name, node in context_cfg["nodes"].items(): LOG.debug(node) try: @@ -550,7 +382,7 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \ LOG.debug("no model for %s, skipping", node_name) continue file_path = scenario_cfg['task_path'] - with open_relative_file(file_name, file_path) as stream: + with utils.open_relative_file(file_name, file_path) as stream: vnf_model = stream.read() vnfd = vnfdgen.generate_vnfd(vnf_model, node) # TODO: here add extra context_cfg["nodes"] regardless of template @@ -600,13 +432,16 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \ vnf.terminate() raise + # we have to generate pod.yaml here after VNF has probed so we know vpci and driver + self._generate_pod_yaml() + # 3. Run experiment # Start listeners first to avoid losing packets for traffic_gen in traffic_runners: traffic_gen.listen_traffic(self.traffic_profile) # register collector with yardstick for KPI collection. - self.collector = Collector(self.vnfs, self.context_cfg["nodes"], self.traffic_profile) + self.collector = Collector(self.vnfs) self.collector.start() # Start the actual traffic @@ -650,3 +485,11 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \ # https://bugs.python.org/issue9400 LOG.exception("") raise RuntimeError("Error in teardown") + + def pre_run_wait_time(self, time_seconds): + """Time waited before executing the run method""" + time.sleep(time_seconds) + + def post_run_wait_time(self, time_seconds): + """Time waited after executing the run method""" + pass