X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=yardstick%2Fnetwork_services%2Fvnf_generic%2Fvnf%2Fprox_helpers.py;h=29f9c7bba43b89e0a1710d235141792994c101ea;hb=de8ce9889cfc9e9d62e26e53b5f27b2f4cd9ff06;hp=d24710132e5397186fe50464da6ed07c01f2c74d;hpb=36a2e04b468d980c98bd43768e4c88d37a8a69f1;p=yardstick.git diff --git a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py index d24710132..29f9c7bba 100644 --- a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py +++ b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py @@ -11,34 +11,31 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import import array -import operator -import logging import io +import logging +import operator import os import re import select import socket - -from collections import OrderedDict, namedtuple import time +from collections import OrderedDict, namedtuple from contextlib import contextmanager from itertools import repeat, chain +from multiprocessing import Queue import six -from multiprocessing import Queue -from six.moves import zip, StringIO from six.moves import cStringIO +from six.moves import zip, StringIO -from yardstick.benchmark.scenarios.networking.vnf_generic import find_relative_file -from yardstick.common.utils import SocketTopology, ip_to_hex, join_non_strings, try_int -from yardstick.network_services.vnf_generic.vnf.iniparser import ConfigParser +from yardstick.common import utils +from yardstick.common.utils import SocketTopology, join_non_strings, try_int +from yardstick.network_services.helpers.iniparser import ConfigParser from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper - PROX_PORT = 8474 SECTION_NAME = 0 @@ -82,7 +79,6 @@ CONFIGURATION_OPTIONS = ( class CoreSocketTuple(namedtuple('CoreTuple', 'core_id, socket_id, hyperthread')): - CORE_RE = re.compile(r"core\s+(\d+)(?:s(\d+))?(h)?$") def __new__(cls, *args): @@ -115,7 +111,6 @@ class CoreSocketTuple(namedtuple('CoreTuple', 'core_id, socket_id, hyperthread') class TotStatsTuple(namedtuple('TotStats', 'rx,tx,tsc,hz')): - def __new__(cls, *args): try: assert args[0] is not str(args[0]) @@ -129,7 +124,6 @@ class TotStatsTuple(namedtuple('TotStats', 'rx,tx,tsc,hz')): class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_rx,' 'delta_tx,delta_tsc,' 'latency,rx_total,tx_total,pps')): - @property def pkt_loss(self): try: @@ -191,7 +185,6 @@ class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_ class PacketDump(object): - @staticmethod def assert_func(func, value1, value2, template=None): assert func(value1, value2), template.format(value1, value2) @@ -268,6 +261,7 @@ class ProxSocketHelper(object): self._sock = sock self._pkt_dumps = [] + self.master_stats = None def connect(self, ip, port): """Connect to the prox instance on the remote system""" @@ -323,6 +317,7 @@ class ProxSocketHelper(object): def get_data(self, pkt_dump_only=False, timeout=1): """ read data from the socket """ + # This method behaves slightly differently depending on whether it is # called to read the response to a command (pkt_dump_only = 0) or if # it is called specifically to read a packet dump (pkt_dump_only = 1). @@ -366,8 +361,9 @@ class ProxSocketHelper(object): """ send data to the remote instance """ LOG.debug("Sending data to socket: [%s]", to_send.rstrip('\n')) try: + # NOTE: sendall will block, we need a timeout self._sock.sendall(to_send.encode('utf-8')) - except: + except: # pylint: disable=bare-except pass def get_packet_dump(self): @@ -434,10 +430,15 @@ class ProxSocketHelper(object): LOG.debug("Set value for core(s) %s", cores) self._run_template_over_cores("reset values {} 0\n", cores) - def set_speed(self, cores, speed): + def set_speed(self, cores, speed, tasks=None): """ set speed on the remote instance """ - LOG.debug("Set speed for core(s) %s to %g", cores, speed) - self._run_template_over_cores("speed {} 0 {}\n", cores, speed) + if tasks is None: + tasks = [0] * len(cores) + elif len(tasks) != len(cores): + LOG.error("set_speed: cores and tasks must have the same len") + LOG.debug("Set speed for core(s)/tasks(s) %s to %g", list(zip(cores, tasks)), speed) + for (core, task) in list(zip(cores, tasks)): + self.put_command("speed {} {} {}\n".format(core, task, speed)) def slope_speed(self, cores_speed, duration, n_steps=0): """will start to increase speed from 0 to N where N is taken from @@ -506,11 +507,6 @@ class ProxSocketHelper(object): def hz(self): return self.get_all_tot_stats()[3] - # Deprecated - # TODO: remove - def rx_stats(self, cores, task=0): - return self.core_stats(cores, task) - def core_stats(self, cores, task=0): """Get the receive statistics from the remote system""" rx = tx = drop = tsc = 0 @@ -541,7 +537,7 @@ class ProxSocketHelper(object): finally: container['end_tot'] = end = self.get_all_tot_stats() - container['delta'] = TotStatsTuple(end - start for start, end in zip(start, end)) + container['delta'] = TotStatsTuple(e - s for s, e in zip(start, end)) def tot_stats(self): """Get the total statistics from the remote system""" @@ -564,7 +560,7 @@ class ProxSocketHelper(object): """Activate dump on rx on the specified core""" LOG.debug("Activating dump on RX for core %d, task %d, count %d", core_id, task_id, count) self.put_command("dump_rx {} {} {}\n".format(core_id, task_id, count)) - time.sleep(1.5) # Give PROX time to set up packet dumping + time.sleep(1.5) # Give PROX time to set up packet dumping def quit(self): self.stop_all() @@ -584,9 +580,14 @@ class ProxSocketHelper(object): time.sleep(3) +_LOCAL_OBJECT = object() + + class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): # the actual app is lowercase APP_NAME = 'prox' + # not used for Prox but added for consistency + VNF_TYPE = "PROX" LUA_PARAMETER_NAME = "" LUA_PARAMETER_PEER = { @@ -594,6 +595,8 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): "sut": "gen", } + CONFIG_QUEUE_TIMEOUT = 120 + def __init__(self, vnfd_helper, ssh_helper, scenario_helper): self.remote_path = None super(ProxDpdkVnfSetupEnvHelper, self).__init__(vnfd_helper, ssh_helper, scenario_helper) @@ -601,13 +604,36 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): self._prox_config_data = None self.additional_files = {} self.config_queue = Queue() + # allow_exit_without_flush + self.config_queue.cancel_join_thread() + self._global_section = None - def _build_pipeline_kwargs(self): - tool_path = self.ssh_helper.provision_tool(tool_file=self.APP_NAME) - self.pipeline_kwargs = { - 'tool_path': tool_path, - 'tool_dir': os.path.dirname(tool_path), - } + @property + def prox_config_data(self): + if self._prox_config_data is None: + # this will block, but it needs too + self._prox_config_data = self.config_queue.get(True, self.CONFIG_QUEUE_TIMEOUT) + return self._prox_config_data + + @property + def global_section(self): + if self._global_section is None and self.prox_config_data: + self._global_section = self.find_section("global") + return self._global_section + + def find_section(self, name, default=_LOCAL_OBJECT): + result = next((value for key, value in self.prox_config_data if key == name), default) + if result is _LOCAL_OBJECT: + raise KeyError('{} not found in Prox config'.format(name)) + return result + + def find_in_section(self, section_name, section_key, default=_LOCAL_OBJECT): + section = self.find_section(section_name, []) + result = next((value for key, value in section if key == section_key), default) + if result is _LOCAL_OBJECT: + template = '{} not found in {} section of Prox config' + raise KeyError(template.format(section_key, section_name)) + return result def copy_to_target(self, config_file_path, prox_file): remote_path = os.path.join("/tmp", prox_file) @@ -650,14 +676,13 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): if port_section_name != section_name: continue - for index, section_data in enumerate(section): + for section_data in section: if section_data[0] == "mac": section_data[1] = "hardware" # search for dst mac for _, section in sections: - # for index, (item_key, item_val) in enumerate(section): - for index, section_data in enumerate(section): + for section_data in section: item_key, item_val = section_data if item_val.startswith("@@dst_mac"): tx_port_iter = re.finditer(r'\d+', item_val) @@ -678,18 +703,38 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): return sections for section_name, section in sections: - for index, section_data in enumerate(section): + for section_data in section: try: if section_data[0].startswith("dofile"): section_data[0] = self._insert_additional_file(section_data[0]) if section_data[1].startswith("dofile"): section_data[1] = self._insert_additional_file(section_data[1]) - except: + except: # pylint: disable=bare-except pass return sections + @staticmethod + def write_prox_lua(lua_config): + """ + Write an .ini-format config file for PROX (parameters.lua) + PROX does not allow a space before/after the =, so we need + a custom method + """ + out = [] + for key in lua_config: + value = '"' + lua_config[key] + '"' + if key == "__name__": + continue + if value is not None and value != '@': + key = "=".join((key, str(value).replace('\n', '\n\t'))) + out.append(key) + else: + key = str(key).replace('\n', '\n\t') + out.append(key) + return os.linesep.join(out) + @staticmethod def write_prox_config(prox_config): """ @@ -698,9 +743,9 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): a custom method """ out = [] - for i, (section_name, section) in enumerate(prox_config): + for (section_name, section) in prox_config: out.append("[{}]".format(section_name)) - for index, item in enumerate(section): + for item in section: key, value = item if key == "__name__": continue @@ -720,69 +765,56 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): def generate_prox_lua_file(self): p = OrderedDict() all_ports = self.vnfd_helper.port_pairs.all_ports - lua_param = self.LUA_PARAMETER_NAME for port_name in all_ports: - peer = self.LUA_PARAMETER_PEER[lua_param] port_num = self.vnfd_helper.port_num(port_name) intf = self.vnfd_helper.find_interface(name=port_name) vintf = intf['virtual-interface'] - local_ip = vintf["local_ip"] - dst_ip = vintf["dst_ip"] - local_ip_hex = ip_to_hex(local_ip, separator=' ') - dst_ip_hex = ip_to_hex(dst_ip, separator=' ') - p.update([ - ("{}_hex_ip_port_{}".format(lua_param, port_num), local_ip_hex), - ("{}_ip_port_{}".format(lua_param, port_num), local_ip), - ("{}_hex_ip_port_{}".format(peer, port_num), dst_ip_hex), - ("{}_ip_port_{}".format(peer, port_num), dst_ip), - ]) - lua = os.linesep.join(('{}:"{}"'.format(k, v) for k, v in p.items())) - return lua - - def upload_prox_lua(self, config_dir, prox_config_dict): - # we could have multiple lua directives - lau_dict = prox_config_dict.get('lua', {}) - find_iter = (re.findall(r'\("([^"]+)"\)', k) for k in lau_dict) - lua_file = next((found[0] for found in find_iter if found), None) - if not lua_file: - return "" - - out = self.generate_prox_lua_file() - remote_path = os.path.join(config_dir, lua_file) - return self.put_string_to_file(out, remote_path) - - def upload_prox_config(self, config_file, prox_config_dict): + p["tester_mac{0}".format(port_num)] = vintf["dst_mac"] + p["src_mac{0}".format(port_num)] = vintf["local_mac"] + + return p + + def upload_prox_lua(self, config_file, lua_data): # prox can't handle spaces around ' = ' so use custom method - out = StringIO(self.write_prox_config(prox_config_dict)) + out = StringIO(self.write_prox_lua(lua_data)) out.seek(0) remote_path = os.path.join("/tmp", config_file) self.ssh_helper.put_file_obj(out, remote_path) return remote_path - CONFIG_QUEUE_TIMEOUT = 120 + def upload_prox_config(self, config_file, prox_config_data): + # prox can't handle spaces around ' = ' so use custom method + out = StringIO(self.write_prox_config(prox_config_data)) + out.seek(0) + remote_path = os.path.join("/tmp", config_file) + self.ssh_helper.put_file_obj(out, remote_path) - @property - def prox_config_data(self): - if self._prox_config_data is None: - # this will block, but it needs too - self._prox_config_data = self.config_queue.get(True, self.CONFIG_QUEUE_TIMEOUT) - return self._prox_config_data + return remote_path def build_config_file(self): task_path = self.scenario_helper.task_path options = self.scenario_helper.options config_path = options['prox_config'] config_file = os.path.basename(config_path) - config_path = find_relative_file(config_path, task_path) + config_path = utils.find_relative_file(config_path, task_path) self.additional_files = {} + try: + if options['prox_generate_parameter']: + self.lua = [] + self.lua = self.generate_prox_lua_file() + if len(self.lua) > 0: + self.upload_prox_lua("parameters.lua", self.lua) + except: # pylint: disable=bare-except + pass + prox_files = options.get('prox_files', []) if isinstance(prox_files, six.string_types): prox_files = [prox_files] for key_prox_file in prox_files: base_prox_file = os.path.basename(key_prox_file) - key_prox_path = find_relative_file(key_prox_file, task_path) + key_prox_path = utils.find_relative_file(key_prox_file, task_path) remote_prox_file = self.copy_to_target(key_prox_path, base_prox_file) self.additional_files[base_prox_file] = remote_prox_file @@ -795,36 +827,31 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): self.build_config_file() options = self.scenario_helper.options - prox_args = options['prox_args'] - LOG.info("Provision and start the %s", self.APP_NAME) - self._build_pipeline_kwargs() - self.pipeline_kwargs["args"] = " ".join( - " ".join([k, v if v else ""]) for k, v in prox_args.items()) - self.pipeline_kwargs["cfg_file"] = self.remote_path + tool_path = self.ssh_helper.join_bin_path(self.APP_NAME) - cmd_template = "sudo bash -c 'cd {tool_dir}; {tool_path} -o cli {args} -f {cfg_file} '" - prox_cmd = cmd_template.format(**self.pipeline_kwargs) - return prox_cmd + self.pipeline_kwargs = { + 'tool_path': tool_path, + 'tool_dir': os.path.dirname(tool_path), + 'cfg_file': self.remote_path, + 'args': ' '.join(' '.join([str(k), str(v) if v else '']) + for k, v in prox_args.items()) + } + + cmd_template = ("sudo bash -c 'cd {tool_dir}; {tool_path} -o cli " + "{args} -f {cfg_file} '") + return cmd_template.format(**self.pipeline_kwargs) # this might be bad, sometimes we want regular ResourceHelper methods, like collect_kpi class ProxResourceHelper(ClientResourceHelper): RESOURCE_WORD = 'prox' - PROX_CORE_GEN_MODE = "gen" - PROX_CORE_LAT_MODE = "lat" - PROX_CORE_MPLS_TEST = "MPLS tag/untag" PROX_MODE = "" WAIT_TIME = 3 - @staticmethod - def line_rate_to_pps(pkt_size, n_ports): - # FIXME Don't hardcode 10Gb/s - return n_ports * TEN_GIGABIT / BITS_PER_BYTE / (pkt_size + 20) - @staticmethod def find_pci(pci, bound_pci): # we have to substring match PCI bus address from the end @@ -837,16 +864,14 @@ class ProxResourceHelper(ClientResourceHelper): self._ip = self.mgmt_interface["ip"] self.done = False - self._cpu_topology = None self._vpci_to_if_name_map = None self.additional_file = {} self.remote_prox_file_name = None self.lower = None self.upper = None - self._test_cores = None - self._latency_cores = None - self._tagged_cores = None - self._plain_cores = None + self.step_delta = 1 + self.step_time = 0.5 + self._test_type = None @property def sut(self): @@ -855,40 +880,13 @@ class ProxResourceHelper(ClientResourceHelper): return self.client @property - def cpu_topology(self): - if not self._cpu_topology: - stdout = io.BytesIO() - self.ssh_helper.get_file_obj("/proc/cpuinfo", stdout) - self._cpu_topology = SocketTopology.parse_cpuinfo(stdout.getvalue().decode('utf-8')) - return self._cpu_topology - - @property - def test_cores(self): - if not self._test_cores: - self._test_cores = self.get_cores(self.PROX_CORE_GEN_MODE) - return self._test_cores - - @property - def mpls_cores(self): - if not self._tagged_cores: - self._tagged_cores, self._plain_cores = self.get_cores_mpls(self.PROX_CORE_GEN_MODE) - return self._tagged_cores, self._plain_cores - - @property - def tagged_cores(self): - return self.mpls_cores[0] - - @property - def plain_cores(self): - return self.mpls_cores[1] - - @property - def latency_cores(self): - if not self._latency_cores: - self._latency_cores = self.get_cores(self.PROX_CORE_LAT_MODE) - return self._latency_cores + def test_type(self): + if self._test_type is None: + self._test_type = self.setup_helper.find_in_section('global', 'name', None) + return self._test_type def run_traffic(self, traffic_profile): + self._queue.cancel_join_thread() self.lower = 0.0 self.upper = 100.0 @@ -930,76 +928,201 @@ class ProxResourceHelper(ClientResourceHelper): func = getattr(self.sut, cmd, None) if func: return func(*args, **kwargs) + return None - @contextmanager - def traffic_context(self, pkt_size, value): - self.sut.stop_all() - self.sut.reset_stats() - if self.get_test_type() == self.PROX_CORE_MPLS_TEST: - self.sut.set_pkt_size(self.tagged_cores, pkt_size) - self.sut.set_pkt_size(self.plain_cores, pkt_size - 4) - self.sut.set_speed(self.tagged_cores, value) - ratio = 1.0 * (pkt_size - 4 + 20) / (pkt_size + 20) - self.sut.set_speed(self.plain_cores, value * ratio) - else: - self.sut.set_pkt_size(self.test_cores, pkt_size) - self.sut.set_speed(self.test_cores, value) + def _connect(self, client=None): + """Run and connect to prox on the remote system """ + # De-allocating a large amount of hugepages takes some time. If a new + # PROX instance is started immediately after killing the previous one, + # it might not be able to allocate hugepages, because they are still + # being freed. Hence the -w switch. + # self.connection.execute("sudo killall -w Prox 2>/dev/null") + # prox_cmd = "export TERM=xterm; cd "+ self.bin_path +"; ./Prox -t + # -f ./handle_none-4.cfg" + # prox_cmd = "export TERM=xterm; export RTE_SDK=" + self._dpdk_dir + + # "; " \ + # + "export RTE_TARGET=" + self._dpdk_target + ";" \ + # + " cd " + self._prox_dir + "; make HW_DIRECT_STATS=y -j50; + # sudo " \ + # + "./build/Prox " + prox_args + # log.debug("Starting PROX with command [%s]", prox_cmd) + # thread.start_new_thread(self.ssh_check_quit, (self, self._user, + # self._ip, prox_cmd)) + if client is None: + client = ProxSocketHelper() - self.sut.start_all() - try: + # try connecting to Prox for 60s + for _ in range(RETRY_SECONDS): + time.sleep(RETRY_INTERVAL) + try: + client.connect(self._ip, PROX_PORT) + except (socket.gaierror, socket.error): + continue + else: + return client + + msg = "Failed to connect to prox, please check if system {} accepts connections on port {}" + raise Exception(msg.format(self._ip, PROX_PORT)) + + +class ProxDataHelper(object): + + def __init__(self, vnfd_helper, sut, pkt_size, value, tolerated_loss): + super(ProxDataHelper, self).__init__() + self.vnfd_helper = vnfd_helper + self.sut = sut + self.pkt_size = pkt_size + self.value = value + self.tolerated_loss = tolerated_loss + self.port_count = len(self.vnfd_helper.port_pairs.all_ports) + self.tsc_hz = None + self.measured_stats = None + self.latency = None + self._totals_and_pps = None + self.result_tuple = None + + @property + def totals_and_pps(self): + if self._totals_and_pps is None: + rx_total, tx_total = self.sut.port_stats(range(self.port_count))[6:8] + pps = self.value / 100.0 * self.line_rate_to_pps() + self._totals_and_pps = rx_total, tx_total, pps + return self._totals_and_pps + + @property + def rx_total(self): + return self.totals_and_pps[0] + + @property + def tx_total(self): + return self.totals_and_pps[1] + + @property + def pps(self): + return self.totals_and_pps[2] + + @property + def samples(self): + samples = {} + for port_name, port_num in self.vnfd_helper.ports_iter(): + try: + port_rx_total, port_tx_total = self.sut.port_stats([port_num])[6:8] + samples[port_name] = { + "in_packets": port_rx_total, + "out_packets": port_tx_total, + } + except (KeyError, TypeError, NameError, MemoryError, ValueError, + SystemError, BufferError): + samples[port_name] = { + "in_packets": 0, + "out_packets": 0, + } + return samples + + def __enter__(self): + self.check_interface_count() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.make_tuple() + + def make_tuple(self): + if self.result_tuple: + return + + self.result_tuple = ProxTestDataTuple( + self.tolerated_loss, + self.tsc_hz, + self.measured_stats['delta'].rx, + self.measured_stats['delta'].tx, + self.measured_stats['delta'].tsc, + self.latency, + self.rx_total, + self.tx_total, + self.pps, + ) + self.result_tuple.log_data() + + @contextmanager + def measure_tot_stats(self): + with self.sut.measure_tot_stats() as self.measured_stats: yield - finally: - self.sut.stop_all() - def run_test(self, pkt_size, duration, value, tolerated_loss=0.0): + def check_interface_count(self): # do this assert in init? unless we expect interface count to # change from one run to another run... - ports = self.vnfd_helper.port_pairs.all_ports - port_count = len(ports) - assert port_count in {1, 2, 4}, \ + assert self.port_count in {1, 2, 4}, \ "Invalid number of ports: 1, 2 or 4 ports only supported at this time" - with self.traffic_context(pkt_size, value): - # Getting statistics to calculate PPS at right speed.... - tsc_hz = float(self.sut.hz()) - time.sleep(2) - with self.sut.measure_tot_stats() as data: - time.sleep(duration) + def capture_tsc_hz(self): + self.tsc_hz = float(self.sut.hz()) - # Get stats before stopping the cores. Stopping cores takes some time - # and might skew results otherwise. - latency = self.get_latency() + def line_rate_to_pps(self): + # NOTE: to fix, don't hardcode 10Gb/s + return self.port_count * TEN_GIGABIT / BITS_PER_BYTE / (self.pkt_size + 20) - deltas = data['delta'] - rx_total, tx_total = self.sut.port_stats(range(port_count))[6:8] - pps = value / 100.0 * self.line_rate_to_pps(pkt_size, port_count) - samples = {} - # we are currently using enumeration to map logical port num to interface - for port_name in ports: - port = self.vnfd_helper.port_num(port_name) - port_rx_total, port_tx_total = self.sut.port_stats([port])[6:8] - samples[port_name] = { - "in_packets": port_rx_total, - "out_packets": port_tx_total, - } - - result = ProxTestDataTuple(tolerated_loss, tsc_hz, deltas.rx, deltas.tx, - deltas.tsc, latency, rx_total, tx_total, pps) - result.log_data() - return result, samples - - def get_test_type(self): - test_type = None - for section_name, section in self.setup_helper.prox_config_data: - if section_name != "global": - continue +class ProxProfileHelper(object): - for key, value in section: - if key == "name" and value == self.PROX_CORE_MPLS_TEST: - test_type = self.PROX_CORE_MPLS_TEST + __prox_profile_type__ = "Generic" + + PROX_CORE_GEN_MODE = "gen" + PROX_CORE_LAT_MODE = "lat" + + @classmethod + def get_cls(cls, helper_type): + """Return class of specified type.""" + if not helper_type: + return ProxProfileHelper + + for profile_helper_class in utils.itersubclasses(cls): + if helper_type == profile_helper_class.__prox_profile_type__: + return profile_helper_class + + return ProxProfileHelper + + @classmethod + def make_profile_helper(cls, resource_helper): + return cls.get_cls(resource_helper.test_type)(resource_helper) + + def __init__(self, resource_helper): + super(ProxProfileHelper, self).__init__() + self.resource_helper = resource_helper + self._cpu_topology = None + self._test_cores = None + self._latency_cores = None + + @property + def cpu_topology(self): + if not self._cpu_topology: + stdout = io.BytesIO() + self.ssh_helper.get_file_obj("/proc/cpuinfo", stdout) + self._cpu_topology = SocketTopology.parse_cpuinfo(stdout.getvalue().decode('utf-8')) + return self._cpu_topology + + @property + def test_cores(self): + if not self._test_cores: + self._test_cores = self.get_cores(self.PROX_CORE_GEN_MODE) + return self._test_cores - return test_type + @property + def latency_cores(self): + if not self._latency_cores: + self._latency_cores = self.get_cores(self.PROX_CORE_LAT_MODE) + return self._latency_cores + + @contextmanager + def traffic_context(self, pkt_size, value): + self.sut.stop_all() + self.sut.reset_stats() + try: + self.sut.set_pkt_size(self.test_cores, pkt_size) + self.sut.set_speed(self.test_cores, value) + self.sut.start_all() + yield + finally: + self.sut.stop_all() def get_cores(self, mode): cores = [] @@ -1011,73 +1134,652 @@ class ProxResourceHelper(ClientResourceHelper): for key, value in section: if key == "mode" and value == mode: core_tuple = CoreSocketTuple(section_name) - core = core_tuple.find_in_topology(self.cpu_topology) + core = core_tuple.core_id cores.append(core) return cores - def get_cores_mpls(self, mode=PROX_CORE_GEN_MODE): + def run_test(self, pkt_size, duration, value, tolerated_loss=0.0): + data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss) + + with data_helper, self.traffic_context(pkt_size, value): + with data_helper.measure_tot_stats(): + time.sleep(duration) + # Getting statistics to calculate PPS at right speed.... + data_helper.capture_tsc_hz() + data_helper.latency = self.get_latency() + + return data_helper.result_tuple, data_helper.samples + + def get_latency(self): + """ + :return: return lat_min, lat_max, lat_avg + :rtype: list + """ + + if not self._latency_cores: + self._latency_cores = self.get_cores(self.PROX_CORE_LAT_MODE) + + if self._latency_cores: + return self.sut.lat_stats(self._latency_cores) + return [] + + def terminate(self): + pass + + def __getattr__(self, item): + return getattr(self.resource_helper, item) + + +class ProxMplsProfileHelper(ProxProfileHelper): + + __prox_profile_type__ = "MPLS tag/untag" + + def __init__(self, resource_helper): + super(ProxMplsProfileHelper, self).__init__(resource_helper) + self._cores_tuple = None + + @property + def mpls_cores(self): + if not self._cores_tuple: + self._cores_tuple = self.get_cores_mpls() + return self._cores_tuple + + @property + def tagged_cores(self): + return self.mpls_cores[0] + + @property + def plain_cores(self): + return self.mpls_cores[1] + + def get_cores_mpls(self): cores_tagged = [] cores_plain = [] - for section_name, section in self.setup_helper.prox_config_data: + for section_name, section in self.resource_helper.setup_helper.prox_config_data: if not section_name.startswith("core"): continue - if all(key != "mode" or value != mode for key, value in section): + if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section): continue for item_key, item_value in section: - if item_key == "name" and item_value.startswith("tag"): + if item_key != 'name': + continue + + if item_value.startswith("tag"): core_tuple = CoreSocketTuple(section_name) - core_tag = core_tuple.find_in_topology(self.cpu_topology) + core_tag = core_tuple.core_id cores_tagged.append(core_tag) - elif item_key == "name" and item_value.startswith("udp"): + elif item_value.startswith("udp"): core_tuple = CoreSocketTuple(section_name) - core_udp = core_tuple.find_in_topology(self.cpu_topology) + core_udp = core_tuple.core_id cores_plain.append(core_udp) return cores_tagged, cores_plain - def get_latency(self): - """ - :return: return lat_min, lat_max, lat_avg - :rtype: list - """ - if self._latency_cores: - return self.sut.lat_stats(self._latency_cores) - return [] + @contextmanager + def traffic_context(self, pkt_size, value): + self.sut.stop_all() + self.sut.reset_stats() + try: + self.sut.set_pkt_size(self.tagged_cores, pkt_size) + self.sut.set_pkt_size(self.plain_cores, pkt_size - 4) + self.sut.set_speed(self.tagged_cores, value) + ratio = 1.0 * (pkt_size - 4 + 20) / (pkt_size + 20) + self.sut.set_speed(self.plain_cores, value * ratio) + self.sut.start_all() + yield + finally: + self.sut.stop_all() - def _connect(self, client=None): - """Run and connect to prox on the remote system """ - # De-allocating a large amount of hugepages takes some time. If a new - # PROX instance is started immediately after killing the previous one, - # it might not be able to allocate hugepages, because they are still - # being freed. Hence the -w switch. - # self.connection.execute("sudo killall -w Prox 2>/dev/null") - # prox_cmd = "export TERM=xterm; cd "+ self.bin_path +"; ./Prox -t - # -f ./handle_none-4.cfg" - # prox_cmd = "export TERM=xterm; export RTE_SDK=" + self._dpdk_dir + - # "; " \ - # + "export RTE_TARGET=" + self._dpdk_target + ";" \ - # + " cd " + self._prox_dir + "; make HW_DIRECT_STATS=y -j50; - # sudo " \ - # + "./build/Prox " + prox_args - # log.debug("Starting PROX with command [%s]", prox_cmd) - # thread.start_new_thread(self.ssh_check_quit, (self, self._user, - # self._ip, prox_cmd)) - if client is None: - client = ProxSocketHelper() - # try connecting to Prox for 60s - for _ in range(RETRY_SECONDS): - time.sleep(RETRY_INTERVAL) - try: - client.connect(self._ip, PROX_PORT) - except (socket.gaierror, socket.error): +class ProxBngProfileHelper(ProxProfileHelper): + + __prox_profile_type__ = "BNG gen" + + def __init__(self, resource_helper): + super(ProxBngProfileHelper, self).__init__(resource_helper) + self._cores_tuple = None + + @property + def bng_cores(self): + if not self._cores_tuple: + self._cores_tuple = self.get_cores_gen_bng_qos() + return self._cores_tuple + + @property + def cpe_cores(self): + return self.bng_cores[0] + + @property + def inet_cores(self): + return self.bng_cores[1] + + @property + def arp_cores(self): + return self.bng_cores[2] + + @property + def arp_task_cores(self): + return self.bng_cores[3] + + @property + def all_rx_cores(self): + return self.latency_cores + + def get_cores_gen_bng_qos(self): + cpe_cores = [] + inet_cores = [] + arp_cores = [] + arp_tasks_core = [0] + for section_name, section in self.resource_helper.setup_helper.prox_config_data: + if not section_name.startswith("core"): continue - else: - return client - msg = "Failed to connect to prox, please check if system {} accepts connections on port {}" - raise Exception(msg.format(self._ip, PROX_PORT)) + if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section): + continue + + for item_key, item_value in section: + if item_key != 'name': + continue + + if item_value.startswith("cpe"): + core_tuple = CoreSocketTuple(section_name) + cpe_core = core_tuple.core_id + cpe_cores.append(cpe_core) + + elif item_value.startswith("inet"): + core_tuple = CoreSocketTuple(section_name) + inet_core = core_tuple.core_id + inet_cores.append(inet_core) + + elif item_value.startswith("arp"): + core_tuple = CoreSocketTuple(section_name) + arp_core = core_tuple.core_id + arp_cores.append(arp_core) + + # We check the tasks/core separately + if item_value.startswith("arp_task"): + core_tuple = CoreSocketTuple(section_name) + arp_task_core = core_tuple.core_id + arp_tasks_core.append(arp_task_core) + + return cpe_cores, inet_cores, arp_cores, arp_tasks_core + + @contextmanager + def traffic_context(self, pkt_size, value): + # Tester is sending packets at the required speed already after + # setup_test(). Just get the current statistics, sleep the required + # amount of time and calculate packet loss. + inet_pkt_size = pkt_size + cpe_pkt_size = pkt_size - 24 + ratio = 1.0 * (cpe_pkt_size + 20) / (inet_pkt_size + 20) + + curr_up_speed = curr_down_speed = 0 + max_up_speed = max_down_speed = value + if ratio < 1: + max_down_speed = value * ratio + else: + max_up_speed = value / ratio + + # Initialize cores + self.sut.stop_all() + time.sleep(0.5) + + # Flush any packets in the NIC RX buffers, otherwise the stats will be + # wrong. + self.sut.start(self.all_rx_cores) + time.sleep(0.5) + self.sut.stop(self.all_rx_cores) + time.sleep(0.5) + self.sut.reset_stats() + + self.sut.set_pkt_size(self.inet_cores, inet_pkt_size) + self.sut.set_pkt_size(self.cpe_cores, cpe_pkt_size) + + self.sut.reset_values(self.cpe_cores) + self.sut.reset_values(self.inet_cores) + + # Set correct IP and UDP lengths in packet headers + # CPE + # IP length (byte 24): 26 for MAC(12), EthType(2), QinQ(8), CRC(4) + self.sut.set_value(self.cpe_cores, 24, cpe_pkt_size - 26, 2) + # UDP length (byte 46): 46 for MAC(12), EthType(2), QinQ(8), IP(20), CRC(4) + self.sut.set_value(self.cpe_cores, 46, cpe_pkt_size - 46, 2) + + # INET + # IP length (byte 20): 22 for MAC(12), EthType(2), MPLS(4), CRC(4) + self.sut.set_value(self.inet_cores, 20, inet_pkt_size - 22, 2) + # IP length (byte 48): 50 for MAC(12), EthType(2), MPLS(4), IP(20), GRE(8), CRC(4) + self.sut.set_value(self.inet_cores, 48, inet_pkt_size - 50, 2) + # UDP length (byte 70): 70 for MAC(12), EthType(2), MPLS(4), IP(20), GRE(8), IP(20), CRC(4) + self.sut.set_value(self.inet_cores, 70, inet_pkt_size - 70, 2) + + # Sending ARP to initialize tables - need a few seconds of generation + # to make sure all CPEs are initialized + LOG.info("Initializing SUT: sending ARP packets") + self.sut.set_speed(self.arp_cores, 1, self.arp_task_cores) + self.sut.set_speed(self.inet_cores, curr_up_speed) + self.sut.set_speed(self.cpe_cores, curr_down_speed) + self.sut.start(self.arp_cores) + time.sleep(4) + + # Ramp up the transmission speed. First go to the common speed, then + # increase steps for the faster one. + self.sut.start(self.cpe_cores + self.inet_cores + self.latency_cores) + + LOG.info("Ramping up speed to %s up, %s down", max_up_speed, max_down_speed) + + while (curr_up_speed < max_up_speed) or (curr_down_speed < max_down_speed): + # The min(..., ...) takes care of 1) floating point rounding errors + # that could make curr_*_speed to be slightly greater than + # max_*_speed and 2) max_*_speed not being an exact multiple of + # self._step_delta. + if curr_up_speed < max_up_speed: + curr_up_speed = min(curr_up_speed + self.step_delta, max_up_speed) + if curr_down_speed < max_down_speed: + curr_down_speed = min(curr_down_speed + self.step_delta, max_down_speed) + + self.sut.set_speed(self.inet_cores, curr_up_speed) + self.sut.set_speed(self.cpe_cores, curr_down_speed) + time.sleep(self.step_time) + + LOG.info("Target speeds reached. Starting real test.") + + yield + + self.sut.stop(self.arp_cores + self.cpe_cores + self.inet_cores) + LOG.info("Test ended. Flushing NIC buffers") + self.sut.start(self.all_rx_cores) + time.sleep(3) + self.sut.stop(self.all_rx_cores) + + def run_test(self, pkt_size, duration, value, tolerated_loss=0.0): + data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss) + + with data_helper, self.traffic_context(pkt_size, value): + with data_helper.measure_tot_stats(): + time.sleep(duration) + # Getting statistics to calculate PPS at right speed.... + data_helper.capture_tsc_hz() + data_helper.latency = self.get_latency() + + return data_helper.result_tuple, data_helper.samples + + +class ProxVpeProfileHelper(ProxProfileHelper): + + __prox_profile_type__ = "vPE gen" + + def __init__(self, resource_helper): + super(ProxVpeProfileHelper, self).__init__(resource_helper) + self._cores_tuple = None + self._ports_tuple = None + + @property + def vpe_cores(self): + if not self._cores_tuple: + self._cores_tuple = self.get_cores_gen_vpe() + return self._cores_tuple + + @property + def cpe_cores(self): + return self.vpe_cores[0] + + @property + def inet_cores(self): + return self.vpe_cores[1] + + @property + def all_rx_cores(self): + return self.latency_cores + + @property + def vpe_ports(self): + if not self._ports_tuple: + self._ports_tuple = self.get_ports_gen_vpe() + return self._ports_tuple + + @property + def cpe_ports(self): + return self.vpe_ports[0] + + @property + def inet_ports(self): + return self.vpe_ports[1] + + def get_cores_gen_vpe(self): + cpe_cores = [] + inet_cores = [] + for section_name, section in self.resource_helper.setup_helper.prox_config_data: + if not section_name.startswith("core"): + continue + + if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section): + continue + + for item_key, item_value in section: + if item_key != 'name': + continue + + if item_value.startswith("cpe"): + core_tuple = CoreSocketTuple(section_name) + core_tag = core_tuple.core_id + cpe_cores.append(core_tag) + + elif item_value.startswith("inet"): + core_tuple = CoreSocketTuple(section_name) + inet_core = core_tuple.core_id + inet_cores.append(inet_core) + + return cpe_cores, inet_cores + + def get_ports_gen_vpe(self): + cpe_ports = [] + inet_ports = [] + + for section_name, section in self.resource_helper.setup_helper.prox_config_data: + if not section_name.startswith("port"): + continue + tx_port_iter = re.finditer(r'\d+', section_name) + tx_port_no = int(next(tx_port_iter).group(0)) + + for item_key, item_value in section: + if item_key != 'name': + continue + + if item_value.startswith("cpe"): + cpe_ports.append(tx_port_no) + + elif item_value.startswith("inet"): + inet_ports.append(tx_port_no) + + return cpe_ports, inet_ports + + @contextmanager + def traffic_context(self, pkt_size, value): + # Calculate the target upload and download speed. The upload and + # download packets have different packet sizes, so in order to get + # equal bandwidth usage, the ratio of the speeds has to match the ratio + # of the packet sizes. + cpe_pkt_size = pkt_size + inet_pkt_size = pkt_size - 4 + ratio = 1.0 * (cpe_pkt_size + 20) / (inet_pkt_size + 20) + + curr_up_speed = curr_down_speed = 0 + max_up_speed = max_down_speed = value + if ratio < 1: + max_down_speed = value * ratio + else: + max_up_speed = value / ratio + + # Adjust speed when multiple cores per port are used to generate traffic + if len(self.cpe_ports) != len(self.cpe_cores): + max_down_speed *= 1.0 * len(self.cpe_ports) / len(self.cpe_cores) + if len(self.inet_ports) != len(self.inet_cores): + max_up_speed *= 1.0 * len(self.inet_ports) / len(self.inet_cores) + + # Initialize cores + self.sut.stop_all() + time.sleep(2) + + # Flush any packets in the NIC RX buffers, otherwise the stats will be + # wrong. + self.sut.start(self.all_rx_cores) + time.sleep(2) + self.sut.stop(self.all_rx_cores) + time.sleep(2) + self.sut.reset_stats() + + self.sut.set_pkt_size(self.inet_cores, inet_pkt_size) + self.sut.set_pkt_size(self.cpe_cores, cpe_pkt_size) + + self.sut.reset_values(self.cpe_cores) + self.sut.reset_values(self.inet_cores) + + # Set correct IP and UDP lengths in packet headers + # CPE: IP length (byte 24): 26 for MAC(12), EthType(2), QinQ(8), CRC(4) + self.sut.set_value(self.cpe_cores, 24, cpe_pkt_size - 26, 2) + # UDP length (byte 46): 46 for MAC(12), EthType(2), QinQ(8), IP(20), CRC(4) + self.sut.set_value(self.cpe_cores, 46, cpe_pkt_size - 46, 2) + + # INET: IP length (byte 20): 22 for MAC(12), EthType(2), MPLS(4), CRC(4) + self.sut.set_value(self.inet_cores, 20, inet_pkt_size - 22, 2) + # UDP length (byte 42): 42 for MAC(12), EthType(2), MPLS(4), IP(20), CRC(4) + self.sut.set_value(self.inet_cores, 42, inet_pkt_size - 42, 2) + + self.sut.set_speed(self.inet_cores, curr_up_speed) + self.sut.set_speed(self.cpe_cores, curr_down_speed) + + # Ramp up the transmission speed. First go to the common speed, then + # increase steps for the faster one. + self.sut.start(self.cpe_cores + self.inet_cores + self.all_rx_cores) + + LOG.info("Ramping up speed to %s up, %s down", max_up_speed, max_down_speed) + + while (curr_up_speed < max_up_speed) or (curr_down_speed < max_down_speed): + # The min(..., ...) takes care of 1) floating point rounding errors + # that could make curr_*_speed to be slightly greater than + # max_*_speed and 2) max_*_speed not being an exact multiple of + # self._step_delta. + if curr_up_speed < max_up_speed: + curr_up_speed = min(curr_up_speed + self.step_delta, max_up_speed) + if curr_down_speed < max_down_speed: + curr_down_speed = min(curr_down_speed + self.step_delta, max_down_speed) + + self.sut.set_speed(self.inet_cores, curr_up_speed) + self.sut.set_speed(self.cpe_cores, curr_down_speed) + time.sleep(self.step_time) + + LOG.info("Target speeds reached. Starting real test.") + + yield + + self.sut.stop(self.cpe_cores + self.inet_cores) + LOG.info("Test ended. Flushing NIC buffers") + self.sut.start(self.all_rx_cores) + time.sleep(3) + self.sut.stop(self.all_rx_cores) + + def run_test(self, pkt_size, duration, value, tolerated_loss=0.0): + data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss) + + with data_helper, self.traffic_context(pkt_size, value): + with data_helper.measure_tot_stats(): + time.sleep(duration) + # Getting statistics to calculate PPS at right speed.... + data_helper.capture_tsc_hz() + data_helper.latency = self.get_latency() + + return data_helper.result_tuple, data_helper.samples + + +class ProxlwAFTRProfileHelper(ProxProfileHelper): + + __prox_profile_type__ = "lwAFTR gen" + + def __init__(self, resource_helper): + super(ProxlwAFTRProfileHelper, self).__init__(resource_helper) + self._cores_tuple = None + self._ports_tuple = None + self.step_delta = 5 + self.step_time = 0.5 + + @property + def _lwaftr_cores(self): + if not self._cores_tuple: + self._cores_tuple = self._get_cores_gen_lwaftr() + return self._cores_tuple + + @property + def tun_cores(self): + return self._lwaftr_cores[0] + + @property + def inet_cores(self): + return self._lwaftr_cores[1] + + @property + def _lwaftr_ports(self): + if not self._ports_tuple: + self._ports_tuple = self._get_ports_gen_lw_aftr() + return self._ports_tuple + + @property + def tun_ports(self): + return self._lwaftr_ports[0] + + @property + def inet_ports(self): + return self._lwaftr_ports[1] + + @property + def all_rx_cores(self): + return self.latency_cores + + def _get_cores_gen_lwaftr(self): + tun_cores = [] + inet_cores = [] + for section_name, section in self.resource_helper.setup_helper.prox_config_data: + if not section_name.startswith("core"): + continue + + if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section): + continue + + core_tuple = CoreSocketTuple(section_name) + core_tag = core_tuple.core_id + for item_value in (v for k, v in section if k == 'name'): + if item_value.startswith('tun'): + tun_cores.append(core_tag) + elif item_value.startswith('inet'): + inet_cores.append(core_tag) + + return tun_cores, inet_cores + + def _get_ports_gen_lw_aftr(self): + tun_ports = [] + inet_ports = [] + + re_port = re.compile(r'port (\d+)') + for section_name, section in self.resource_helper.setup_helper.prox_config_data: + match = re_port.search(section_name) + if not match: + continue + + tx_port_no = int(match.group(1)) + for item_value in (v for k, v in section if k == 'name'): + if item_value.startswith('lwB4'): + tun_ports.append(tx_port_no) + elif item_value.startswith('inet'): + inet_ports.append(tx_port_no) + + return tun_ports, inet_ports + + @staticmethod + def _resize(len1, len2): + if len1 == len2: + return 1.0 + return 1.0 * len1 / len2 + + @contextmanager + def traffic_context(self, pkt_size, value): + # Tester is sending packets at the required speed already after + # setup_test(). Just get the current statistics, sleep the required + # amount of time and calculate packet loss. + tun_pkt_size = pkt_size + inet_pkt_size = pkt_size - 40 + ratio = 1.0 * (tun_pkt_size + 20) / (inet_pkt_size + 20) + + curr_up_speed = curr_down_speed = 0 + max_up_speed = max_down_speed = value + + max_up_speed = value / ratio + + # Adjust speed when multiple cores per port are used to generate traffic + if len(self.tun_ports) != len(self.tun_cores): + max_down_speed *= self._resize(len(self.tun_ports), len(self.tun_cores)) + if len(self.inet_ports) != len(self.inet_cores): + max_up_speed *= self._resize(len(self.inet_ports), len(self.inet_cores)) + + # Initialize cores + self.sut.stop_all() + time.sleep(0.5) + + # Flush any packets in the NIC RX buffers, otherwise the stats will be + # wrong. + self.sut.start(self.all_rx_cores) + time.sleep(0.5) + self.sut.stop(self.all_rx_cores) + time.sleep(0.5) + self.sut.reset_stats() + + self.sut.set_pkt_size(self.inet_cores, inet_pkt_size) + self.sut.set_pkt_size(self.tun_cores, tun_pkt_size) + + self.sut.reset_values(self.tun_cores) + self.sut.reset_values(self.inet_cores) + + # Set correct IP and UDP lengths in packet headers + # tun + # IPv6 length (byte 18): 58 for MAC(12), EthType(2), IPv6(40) , CRC(4) + self.sut.set_value(self.tun_cores, 18, tun_pkt_size - 58, 2) + # IP length (byte 56): 58 for MAC(12), EthType(2), CRC(4) + self.sut.set_value(self.tun_cores, 56, tun_pkt_size - 58, 2) + # UDP length (byte 78): 78 for MAC(12), EthType(2), IP(20), UDP(8), CRC(4) + self.sut.set_value(self.tun_cores, 78, tun_pkt_size - 78, 2) + + # INET + # IP length (byte 20): 22 for MAC(12), EthType(2), CRC(4) + self.sut.set_value(self.inet_cores, 16, inet_pkt_size - 18, 2) + # UDP length (byte 42): 42 for MAC(12), EthType(2), IP(20), UPD(8), CRC(4) + self.sut.set_value(self.inet_cores, 38, inet_pkt_size - 38, 2) + + LOG.info("Initializing SUT: sending lwAFTR packets") + self.sut.set_speed(self.inet_cores, curr_up_speed) + self.sut.set_speed(self.tun_cores, curr_down_speed) + time.sleep(4) + + # Ramp up the transmission speed. First go to the common speed, then + # increase steps for the faster one. + self.sut.start(self.tun_cores + self.inet_cores + self.latency_cores) + + LOG.info("Ramping up speed to %s up, %s down", max_up_speed, max_down_speed) + + while (curr_up_speed < max_up_speed) or (curr_down_speed < max_down_speed): + # The min(..., ...) takes care of 1) floating point rounding errors + # that could make curr_*_speed to be slightly greater than + # max_*_speed and 2) max_*_speed not being an exact multiple of + # self._step_delta. + if curr_up_speed < max_up_speed: + curr_up_speed = min(curr_up_speed + self.step_delta, max_up_speed) + if curr_down_speed < max_down_speed: + curr_down_speed = min(curr_down_speed + self.step_delta, max_down_speed) + + self.sut.set_speed(self.inet_cores, curr_up_speed) + self.sut.set_speed(self.tun_cores, curr_down_speed) + time.sleep(self.step_time) + + LOG.info("Target speeds reached. Starting real test.") + + yield + + self.sut.stop(self.tun_cores + self.inet_cores) + LOG.info("Test ended. Flushing NIC buffers") + self.sut.start(self.all_rx_cores) + time.sleep(3) + self.sut.stop(self.all_rx_cores) + + def run_test(self, pkt_size, duration, value, tolerated_loss=0.0): + data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss) + + with data_helper, self.traffic_context(pkt_size, value): + with data_helper.measure_tot_stats(): + time.sleep(duration) + # Getting statistics to calculate PPS at right speed.... + data_helper.capture_tsc_hz() + data_helper.latency = self.get_latency() + + return data_helper.result_tuple, data_helper.samples