1 # Copyright (c) 2017 Intel Corporation
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import
25 from collections import OrderedDict, namedtuple
26 from contextlib import contextmanager
27 from itertools import repeat, chain
28 from multiprocessing import Queue
31 from six.moves import cStringIO
32 from six.moves import zip, StringIO
34 from yardstick.benchmark.scenarios.networking.vnf_generic import find_relative_file
35 from yardstick.common import utils
36 from yardstick.common.utils import SocketTopology, join_non_strings, try_int
37 from yardstick.network_services.helpers.iniparser import ConfigParser
38 from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
39 from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
46 LOG = logging.getLogger(__name__)
47 LOG.setLevel(logging.DEBUG)
54 CONFIGURATION_OPTIONS = (
55 # dict key section key default value
56 ('pktSizes', 'general', 'pkt_sizes', '64,128,256,512,1024,1280,1518'),
57 ('testDuration', 'general', 'test_duration', 5.0),
58 ('testPrecision', 'general', 'test_precision', 1.0),
59 ('tests', 'general', 'tests', None),
60 ('toleratedLoss', 'general', 'tolerated_loss', 0.0),
62 ('logFile', 'logging', 'file', 'dats.log'),
63 ('logDateFormat', 'logging', 'datefmt', None),
64 ('logLevel', 'logging', 'level', 'INFO'),
65 ('logOverwrite', 'logging', 'overwrite', 1),
67 ('testerIp', 'tester', 'ip', None),
68 ('testerUser', 'tester', 'user', 'root'),
69 ('testerDpdkDir', 'tester', 'rte_sdk', '/root/dpdk'),
70 ('testerDpdkTgt', 'tester', 'rte_target', 'x86_64-native-linuxapp-gcc'),
71 ('testerProxDir', 'tester', 'prox_dir', '/root/prox'),
72 ('testerSocketId', 'tester', 'socket_id', 0),
74 ('sutIp', 'sut', 'ip', None),
75 ('sutUser', 'sut', 'user', 'root'),
76 ('sutDpdkDir', 'sut', 'rte_sdk', '/root/dpdk'),
77 ('sutDpdkTgt', 'sut', 'rte_target', 'x86_64-native-linuxapp-gcc'),
78 ('sutProxDir', 'sut', 'prox_dir', '/root/prox'),
79 ('sutSocketId', 'sut', 'socket_id', 0),
83 class CoreSocketTuple(namedtuple('CoreTuple', 'core_id, socket_id, hyperthread')):
84 CORE_RE = re.compile(r"core\s+(\d+)(?:s(\d+))?(h)?$")
86 def __new__(cls, *args):
88 matches = cls.CORE_RE.search(str(args[0]))
90 args = matches.groups()
92 return super(CoreSocketTuple, cls).__new__(cls, int(args[0]), try_int(args[1], 0),
93 'h' if args[2] else '')
95 except (AttributeError, TypeError, IndexError, ValueError):
96 raise ValueError('Invalid core spec {}'.format(args))
98 def is_hyperthread(self):
99 return self.hyperthread == 'h'
103 return int(self.is_hyperthread())
105 def find_in_topology(self, cpu_topology):
107 socket_core_match = cpu_topology[self.socket_id][self.core_id]
108 sorted_match = sorted(socket_core_match.values())
109 return sorted_match[self.index][0]
110 except (KeyError, IndexError):
111 template = "Core {}{} on socket {} does not exist"
112 raise ValueError(template.format(self.core_id, self.hyperthread, self.socket_id))
115 class TotStatsTuple(namedtuple('TotStats', 'rx,tx,tsc,hz')):
116 def __new__(cls, *args):
118 assert args[0] is not str(args[0])
119 args = tuple(args[0])
120 except (AssertionError, IndexError, TypeError):
123 return super(TotStatsTuple, cls).__new__(cls, *args)
126 class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_rx,'
127 'delta_tx,delta_tsc,'
128 'latency,rx_total,tx_total,pps')):
132 return 1e2 * self.drop_total / float(self.tx_total)
133 except ZeroDivisionError:
138 # calculate the effective throughput in Mpps
139 return float(self.delta_tx) * self.tsc_hz / self.delta_tsc / 1e6
142 def can_be_lost(self):
143 return int(self.tx_total * self.tolerated / 1e2)
146 def drop_total(self):
147 return self.tx_total - self.rx_total
151 return self.drop_total <= self.can_be_lost
153 def get_samples(self, pkt_size, pkt_loss=None, port_samples=None):
155 pkt_loss = self.pkt_loss
157 if port_samples is None:
167 "Throughput": self.mpps,
168 "DropPackets": pkt_loss,
169 "CurrentDropPackets": pkt_loss,
170 "TxThroughput": self.pps / 1e6,
171 "RxThroughput": self.mpps,
175 samples.update(port_samples)
177 samples.update((key, value) for key, value in zip(latency_keys, self.latency))
180 def log_data(self, logger=None):
184 template = "RX: %d; TX: %d; dropped: %d (tolerated: %d)"
185 logger.debug(template, self.rx_total, self.tx_total, self.drop_total, self.can_be_lost)
186 logger.debug("Mpps configured: %f; Mpps effective %f", self.pps / 1e6, self.mpps)
189 class PacketDump(object):
191 def assert_func(func, value1, value2, template=None):
192 assert func(value1, value2), template.format(value1, value2)
194 def __init__(self, port_id, data_len, payload):
195 template = "Packet dump has specified length {}, but payload is {} bytes long"
196 self.assert_func(operator.eq, data_len, len(payload), template)
197 self._port_id = port_id
198 self._data_len = data_len
199 self._payload = payload
203 """Get the port id of the packet dump"""
208 """Get the length of the data received"""
209 return self._data_len
212 return '<PacketDump port: {} payload: {}>'.format(self._port_id, self._payload)
214 def payload(self, start=None, end=None):
215 """Get part of the payload as a list of ordinals.
217 Returns a list of byte values, matching the contents of the packet dump.
218 Optional start and end parameters can be specified to retrieve only a
219 part of the packet contents.
221 The number of elements in the list is equal to end - start + 1, so end
222 is the offset of the last character.
225 start (pos. int): the starting offset in the payload. If it is not
226 specified or None, offset 0 is assumed.
227 end (pos. int): the ending offset of the payload. If it is not
228 specified or None, the contents until the end of the packet are
232 [int, int, ...]. Each int represents the ordinal value of a byte in
239 end = self.data_len - 1
241 # Bounds checking on offsets
242 template = "Start offset must be non-negative"
243 self.assert_func(operator.ge, start, 0, template)
245 template = "End offset must be less than {1}"
246 self.assert_func(operator.lt, end, self.data_len, template)
248 # Adjust for splice operation: end offset must be 1 more than the offset
249 # of the last desired character.
252 return self._payload[start:end]
255 class ProxSocketHelper(object):
257 def __init__(self, sock=None):
258 """ creates new prox instance """
259 super(ProxSocketHelper, self).__init__()
262 sock = socket.socket()
266 self.master_stats = None
268 def connect(self, ip, port):
269 """Connect to the prox instance on the remote system"""
270 self._sock.connect((ip, port))
272 def get_socket(self):
273 """ get the socket connected to the remote instance """
276 def _parse_socket_data(self, decoded_data, pkt_dump_only):
277 def get_newline_index():
278 return decoded_data.find('\n', index)
282 for newline_index in iter(get_newline_index, -1):
283 ret_str = decoded_data[index:newline_index]
286 mode, port_id, data_len = ret_str.split(',', 2)
288 mode, port_id, data_len = None, None, None
290 if mode != 'pktdump':
291 # Regular 1-line message. Stop reading from the socket.
292 LOG.debug("Regular response read")
295 LOG.debug("Packet dump header read: [%s]", ret_str)
297 # The line is a packet dump header. Parse it, read the
298 # packet payload, store the dump for later retrieval.
299 # Skip over the packet dump and continue processing: a
300 # 1-line response may follow the packet dump.
302 data_len = int(data_len)
303 data_start = newline_index + 1 # + 1 to skip over \n
304 data_end = data_start + data_len
305 sub_data = decoded_data[data_start:data_end]
306 pkt_payload = array.array('B', (ord(v) for v in sub_data))
307 pkt_dump = PacketDump(int(port_id), data_len, pkt_payload)
308 self._pkt_dumps.append(pkt_dump)
311 # Return boolean instead of string to signal
312 # successful reception of the packet dump.
313 LOG.debug("Packet dump stored, returning")
320 def get_data(self, pkt_dump_only=False, timeout=1):
321 """ read data from the socket """
323 # This method behaves slightly differently depending on whether it is
324 # called to read the response to a command (pkt_dump_only = 0) or if
325 # it is called specifically to read a packet dump (pkt_dump_only = 1).
327 # Packet dumps look like:
328 # pktdump,<port_id>,<data_len>\n
329 # <packet contents as byte array>\n
330 # This means the total packet dump message consists of 2 lines instead
333 # - Response for a command (pkt_dump_only = 0):
334 # 1) Read response from the socket until \n (end of message)
335 # 2a) If the response is a packet dump header (starts with "pktdump,"):
336 # - Read the packet payload and store the packet dump for later
338 # - Reset the state and restart from 1). Eventually state 2b) will
339 # be reached and the function will return.
340 # 2b) If the response is not a packet dump:
341 # - Return the received message as a string
343 # - Explicit request to read a packet dump (pkt_dump_only = 1):
344 # - Read the dump header and payload
345 # - Store the packet dump for later retrieval
346 # - Return True to signify a packet dump was successfully read
349 # recv() is blocking, so avoid calling it when no data is waiting.
350 ready = select.select([self._sock], [], [], timeout)
351 return bool(ready[0])
355 for status in iter(is_ready, False):
356 decoded_data = self._sock.recv(256).decode('utf-8')
357 ret_str = self._parse_socket_data(decoded_data, pkt_dump_only)
359 LOG.debug("Received data from socket: [%s]", ret_str)
360 return ret_str if status else ''
362 def put_command(self, to_send):
363 """ send data to the remote instance """
364 LOG.debug("Sending data to socket: [%s]", to_send.rstrip('\n'))
366 # TODO: sendall will block, we need a timeout
367 self._sock.sendall(to_send.encode('utf-8'))
371 def get_packet_dump(self):
372 """ get the next packet dump """
374 return self._pkt_dumps.pop(0)
377 def stop_all_reset(self):
378 """ stop the remote instance and reset stats """
379 LOG.debug("Stop all and reset stats")
384 """ stop all cores on the remote instance """
385 LOG.debug("Stop all")
386 self.put_command("stop all\n")
389 def stop(self, cores, task=''):
390 """ stop specific cores on the remote instance """
391 LOG.debug("Stopping cores %s", cores)
392 self.put_command("stop {} {}\n".format(join_non_strings(',', cores), task))
396 """ start all cores on the remote instance """
397 LOG.debug("Start all")
398 self.put_command("start all\n")
400 def start(self, cores):
401 """ start specific cores on the remote instance """
402 LOG.debug("Starting cores %s", cores)
403 self.put_command("start {}\n".format(join_non_strings(',', cores)))
406 def reset_stats(self):
407 """ reset the statistics on the remote instance """
408 LOG.debug("Reset stats")
409 self.put_command("reset stats\n")
412 def _run_template_over_cores(self, template, cores, *args):
414 self.put_command(template.format(core, *args))
416 def set_pkt_size(self, cores, pkt_size):
417 """ set the packet size to generate on the remote instance """
418 LOG.debug("Set packet size for core(s) %s to %d", cores, pkt_size)
420 self._run_template_over_cores("pkt_size {} 0 {}\n", cores, pkt_size)
423 def set_value(self, cores, offset, value, length):
424 """ set value on the remote instance """
425 msg = "Set value for core(s) %s to '%s' (length %d), offset %d"
426 LOG.debug(msg, cores, value, length, offset)
427 template = "set value {} 0 {} {} {}\n"
428 self._run_template_over_cores(template, cores, offset, value, length)
430 def reset_values(self, cores):
431 """ reset values on the remote instance """
432 LOG.debug("Set value for core(s) %s", cores)
433 self._run_template_over_cores("reset values {} 0\n", cores)
435 def set_speed(self, cores, speed, tasks=None):
436 """ set speed on the remote instance """
438 tasks = [0] * len(cores)
439 elif len(tasks) != len(cores):
440 LOG.error("set_speed: cores and tasks must have the same len")
441 LOG.debug("Set speed for core(s)/tasks(s) %s to %g", list(zip(cores, tasks)), speed)
442 for (core, task) in list(zip(cores, tasks)):
443 self.put_command("speed {} {} {}\n".format(core, task, speed))
445 def slope_speed(self, cores_speed, duration, n_steps=0):
446 """will start to increase speed from 0 to N where N is taken from
447 a['speed'] for each a in cores_speed"""
448 # by default, each step will take 0.5 sec
450 n_steps = duration * 2
452 private_core_data = []
453 step_duration = float(duration) / n_steps
454 for core_data in cores_speed:
455 target = float(core_data['speed'])
456 private_core_data.append({
457 'cores': core_data['cores'],
459 'delta': target / n_steps,
464 deltas_keys_iter = repeat(('current', 'delta'), n_steps - 1)
465 for key1, key2 in chain(deltas_keys_iter, [('zero', 'speed')]):
466 time.sleep(step_duration)
467 for core_data in private_core_data:
468 core_data['current'] = core_data[key1] + core_data[key2]
469 self.set_speed(core_data['cores'], core_data['current'])
471 def set_pps(self, cores, pps, pkt_size):
472 """ set packets per second for specific cores on the remote instance """
473 msg = "Set packets per sec for core(s) %s to %g%% of line rate (packet size: %d)"
474 LOG.debug(msg, cores, pps, pkt_size)
476 # speed in percent of line-rate
477 speed = float(pps) * (pkt_size + 20) / TEN_GIGABIT / BITS_PER_BYTE
478 self._run_template_over_cores("speed {} 0 {}\n", cores, speed)
480 def lat_stats(self, cores, task=0):
481 """Get the latency statistics from the remote system"""
482 # 1-based index, if max core is 4, then 0, 1, 2, 3, 4 len = 5
487 self.put_command("lat stats {} {} \n".format(core, task))
488 ret = self.get_data()
491 lat_min[core], lat_max[core], lat_avg[core] = \
492 tuple(int(n) for n in ret.split(",")[:3])
494 except (AttributeError, ValueError, TypeError):
497 return lat_min, lat_max, lat_avg
499 def get_all_tot_stats(self):
500 self.put_command("tot stats\n")
501 all_stats_str = self.get_data().split(",")
502 if len(all_stats_str) != 4:
505 all_stats = TotStatsTuple(int(v) for v in all_stats_str)
506 self.master_stats = all_stats
510 return self.get_all_tot_stats()[3]
514 def rx_stats(self, cores, task=0):
515 return self.core_stats(cores, task)
517 def core_stats(self, cores, task=0):
518 """Get the receive statistics from the remote system"""
519 rx = tx = drop = tsc = 0
521 self.put_command("core stats {} {}\n".format(core, task))
522 ret = self.get_data().split(",")
527 return rx, tx, drop, tsc
529 def port_stats(self, ports):
530 """get counter values from a specific port"""
531 tot_result = [0] * 12
533 self.put_command("port_stats {}\n".format(port))
534 ret = [try_int(s, 0) for s in self.get_data().split(",")]
535 tot_result = [sum(x) for x in zip(tot_result, ret)]
539 def measure_tot_stats(self):
540 start = self.get_all_tot_stats()
541 container = {'start_tot': start}
545 container['end_tot'] = end = self.get_all_tot_stats()
547 container['delta'] = TotStatsTuple(end - start for start, end in zip(start, end))
550 """Get the total statistics from the remote system"""
551 stats = self.get_all_tot_stats()
554 def tot_ierrors(self):
555 """Get the total ierrors from the remote system"""
556 self.put_command("tot ierrors tot\n")
557 recv = self.get_data().split(',')
558 tot_ierrors = int(recv[0])
560 return tot_ierrors, tsc
562 def set_count(self, count, cores):
563 """Set the number of packets to send on the specified core"""
564 self._run_template_over_cores("count {} 0 {}\n", cores, count)
566 def dump_rx(self, core_id, task_id=0, count=1):
567 """Activate dump on rx on the specified core"""
568 LOG.debug("Activating dump on RX for core %d, task %d, count %d", core_id, task_id, count)
569 self.put_command("dump_rx {} {} {}\n".format(core_id, task_id, count))
570 time.sleep(1.5) # Give PROX time to set up packet dumping
578 """ stop all cores on the remote instance """
579 LOG.debug("Quit prox")
580 self.put_command("quit\n")
583 def force_quit(self):
584 """ stop all cores on the remote instance """
585 LOG.debug("Force Quit prox")
586 self.put_command("quit_force\n")
590 _LOCAL_OBJECT = object()
593 class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper):
594 # the actual app is lowercase
596 # not used for Prox but added for consistency
599 LUA_PARAMETER_NAME = ""
600 LUA_PARAMETER_PEER = {
605 CONFIG_QUEUE_TIMEOUT = 120
607 def __init__(self, vnfd_helper, ssh_helper, scenario_helper):
608 self.remote_path = None
609 super(ProxDpdkVnfSetupEnvHelper, self).__init__(vnfd_helper, ssh_helper, scenario_helper)
610 self.remote_prox_file_name = None
611 self._prox_config_data = None
612 self.additional_files = {}
613 self.config_queue = Queue()
614 # allow_exit_without_flush
615 self.config_queue.cancel_join_thread()
616 self._global_section = None
619 def prox_config_data(self):
620 if self._prox_config_data is None:
621 # this will block, but it needs too
622 self._prox_config_data = self.config_queue.get(True, self.CONFIG_QUEUE_TIMEOUT)
623 return self._prox_config_data
626 def global_section(self):
627 if self._global_section is None and self.prox_config_data:
628 self._global_section = self.find_section("global")
629 return self._global_section
631 def find_section(self, name, default=_LOCAL_OBJECT):
632 result = next((value for key, value in self.prox_config_data if key == name), default)
633 if result is _LOCAL_OBJECT:
634 raise KeyError('{} not found in Prox config'.format(name))
637 def find_in_section(self, section_name, section_key, default=_LOCAL_OBJECT):
638 section = self.find_section(section_name, [])
639 result = next((value for key, value in section if key == section_key), default)
640 if result is _LOCAL_OBJECT:
641 template = '{} not found in {} section of Prox config'
642 raise KeyError(template.format(section_key, section_name))
645 def _build_pipeline_kwargs(self):
646 tool_path = self.ssh_helper.provision_tool(tool_file=self.APP_NAME)
647 self.pipeline_kwargs = {
648 'tool_path': tool_path,
649 'tool_dir': os.path.dirname(tool_path),
652 def copy_to_target(self, config_file_path, prox_file):
653 remote_path = os.path.join("/tmp", prox_file)
654 self.ssh_helper.put(config_file_path, remote_path)
658 def _get_tx_port(section, sections):
660 for item in sections[section]:
661 if item[0] == "tx port":
662 iface_port = re.findall(r'\d+', item[1])
663 # do we want the last one?
664 # if yes, then can we reverse?
665 return int(iface_port[0])
668 def _replace_quoted_with_value(quoted, value, count=1):
669 new_string = re.sub('"[^"]*"', '"{}"'.format(value), quoted, count)
672 def _insert_additional_file(self, value):
673 file_str = value.split('"')
674 base_name = os.path.basename(file_str[1])
675 file_str[1] = self.additional_files[base_name]
676 return '"'.join(file_str)
678 def generate_prox_config_file(self, config_path):
680 prox_config = ConfigParser(config_path, sections)
683 # Ensure MAC is set "hardware"
684 all_ports = self.vnfd_helper.port_pairs.all_ports
685 # use dpdk port number
686 for port_name in all_ports:
687 port_num = self.vnfd_helper.port_num(port_name)
688 port_section_name = "port {}".format(port_num)
689 for section_name, section in sections:
690 if port_section_name != section_name:
693 for index, section_data in enumerate(section):
694 if section_data[0] == "mac":
695 section_data[1] = "hardware"
698 for _, section in sections:
699 # for index, (item_key, item_val) in enumerate(section):
700 for index, section_data in enumerate(section):
701 item_key, item_val = section_data
702 if item_val.startswith("@@dst_mac"):
703 tx_port_iter = re.finditer(r'\d+', item_val)
704 tx_port_no = int(next(tx_port_iter).group(0))
705 intf = self.vnfd_helper.find_interface_by_port(tx_port_no)
706 mac = intf["virtual-interface"]["dst_mac"]
707 section_data[1] = mac.replace(":", " ", 6)
709 if item_key == "dst mac" and item_val.startswith("@@"):
710 tx_port_iter = re.finditer(r'\d+', item_val)
711 tx_port_no = int(next(tx_port_iter).group(0))
712 intf = self.vnfd_helper.find_interface_by_port(tx_port_no)
713 mac = intf["virtual-interface"]["dst_mac"]
714 section_data[1] = mac
716 # if addition file specified in prox config
717 if not self.additional_files:
720 for section_name, section in sections:
721 for index, section_data in enumerate(section):
723 if section_data[0].startswith("dofile"):
724 section_data[0] = self._insert_additional_file(section_data[0])
726 if section_data[1].startswith("dofile"):
727 section_data[1] = self._insert_additional_file(section_data[1])
734 def write_prox_lua(lua_config):
736 Write an .ini-format config file for PROX (parameters.lua)
737 PROX does not allow a space before/after the =, so we need
741 for key in lua_config:
742 value = '"' + lua_config[key] + '"'
743 if key == "__name__":
745 if value is not None and value != '@':
746 key = "=".join((key, str(value).replace('\n', '\n\t')))
749 key = str(key).replace('\n', '\n\t')
751 return os.linesep.join(out)
754 def write_prox_config(prox_config):
756 Write an .ini-format config file for PROX
757 PROX does not allow a space before/after the =, so we need
761 for i, (section_name, section) in enumerate(prox_config):
762 out.append("[{}]".format(section_name))
763 for index, item in enumerate(section):
765 if key == "__name__":
767 if value is not None and value != '@':
768 key = "=".join((key, str(value).replace('\n', '\n\t')))
771 key = str(key).replace('\n', '\n\t')
773 return os.linesep.join(out)
775 def put_string_to_file(self, s, remote_path):
776 file_obj = cStringIO(s)
777 self.ssh_helper.put_file_obj(file_obj, remote_path)
780 def generate_prox_lua_file(self):
782 all_ports = self.vnfd_helper.port_pairs.all_ports
783 for port_name in all_ports:
784 port_num = self.vnfd_helper.port_num(port_name)
785 intf = self.vnfd_helper.find_interface(name=port_name)
786 vintf = intf['virtual-interface']
787 p["tester_mac{0}".format(port_num)] = vintf["dst_mac"]
788 p["src_mac{0}".format(port_num)] = vintf["local_mac"]
792 def upload_prox_lua(self, config_file, lua_data):
793 # prox can't handle spaces around ' = ' so use custom method
794 out = StringIO(self.write_prox_lua(lua_data))
796 remote_path = os.path.join("/tmp", config_file)
797 self.ssh_helper.put_file_obj(out, remote_path)
801 def upload_prox_config(self, config_file, prox_config_data):
802 # prox can't handle spaces around ' = ' so use custom method
803 out = StringIO(self.write_prox_config(prox_config_data))
805 remote_path = os.path.join("/tmp", config_file)
806 self.ssh_helper.put_file_obj(out, remote_path)
810 def build_config_file(self):
811 task_path = self.scenario_helper.task_path
812 options = self.scenario_helper.options
813 config_path = options['prox_config']
814 config_file = os.path.basename(config_path)
815 config_path = find_relative_file(config_path, task_path)
816 self.additional_files = {}
819 if options['prox_generate_parameter']:
821 self.lua = self.generate_prox_lua_file()
822 if len(self.lua) > 0:
823 self.upload_prox_lua("parameters.lua", self.lua)
827 prox_files = options.get('prox_files', [])
828 if isinstance(prox_files, six.string_types):
829 prox_files = [prox_files]
830 for key_prox_file in prox_files:
831 base_prox_file = os.path.basename(key_prox_file)
832 key_prox_path = find_relative_file(key_prox_file, task_path)
833 remote_prox_file = self.copy_to_target(key_prox_path, base_prox_file)
834 self.additional_files[base_prox_file] = remote_prox_file
836 self._prox_config_data = self.generate_prox_config_file(config_path)
837 # copy config to queue so we can read it from traffic_runner process
838 self.config_queue.put(self._prox_config_data)
839 self.remote_path = self.upload_prox_config(config_file, self._prox_config_data)
841 def build_config(self):
842 self.build_config_file()
844 options = self.scenario_helper.options
846 prox_args = options['prox_args']
847 LOG.info("Provision and start the %s", self.APP_NAME)
848 self._build_pipeline_kwargs()
849 self.pipeline_kwargs["args"] = " ".join(
850 " ".join([k, v if v else ""]) for k, v in prox_args.items())
851 self.pipeline_kwargs["cfg_file"] = self.remote_path
853 cmd_template = "sudo bash -c 'cd {tool_dir}; {tool_path} -o cli {args} -f {cfg_file} '"
854 prox_cmd = cmd_template.format(**self.pipeline_kwargs)
858 # this might be bad, sometimes we want regular ResourceHelper methods, like collect_kpi
859 class ProxResourceHelper(ClientResourceHelper):
861 RESOURCE_WORD = 'prox'
868 def find_pci(pci, bound_pci):
869 # we have to substring match PCI bus address from the end
870 return any(b.endswith(pci) for b in bound_pci)
872 def __init__(self, setup_helper):
873 super(ProxResourceHelper, self).__init__(setup_helper)
874 self.mgmt_interface = self.vnfd_helper.mgmt_interface
875 self._user = self.mgmt_interface["user"]
876 self._ip = self.mgmt_interface["ip"]
879 self._vpci_to_if_name_map = None
880 self.additional_file = {}
881 self.remote_prox_file_name = None
886 self._test_type = None
891 self.client = self._connect()
896 if self._test_type is None:
897 self._test_type = self.setup_helper.find_in_section('global', 'name', None)
898 return self._test_type
900 def run_traffic(self, traffic_profile):
901 self._queue.cancel_join_thread()
905 traffic_profile.init(self._queue)
906 # this frees up the run_traffic loop
907 self.client_started.value = 1
909 while not self._terminated.value:
910 # move it all to traffic_profile
911 self._run_traffic_once(traffic_profile)
913 def _run_traffic_once(self, traffic_profile):
914 traffic_profile.execute_traffic(self)
915 if traffic_profile.done:
916 self._queue.put({'done': True})
917 LOG.debug("tg_prox done")
918 self._terminated.value = 1
920 # For VNF use ResourceHelper method to collect KPIs directly.
921 # for TG leave the superclass ClientResourceHelper collect_kpi_method intact
922 def collect_collectd_kpi(self):
923 return self._collect_resource_kpi()
925 def collect_kpi(self):
926 result = super(ProxResourceHelper, self).collect_kpi()
927 # add in collectd kpis manually
929 result['collect_stats'] = self._collect_resource_kpi()
933 # should not be called, use VNF terminate
934 raise NotImplementedError()
937 return self.sut # force connection
939 def execute(self, cmd, *args, **kwargs):
940 func = getattr(self.sut, cmd, None)
942 return func(*args, **kwargs)
944 def _connect(self, client=None):
945 """Run and connect to prox on the remote system """
946 # De-allocating a large amount of hugepages takes some time. If a new
947 # PROX instance is started immediately after killing the previous one,
948 # it might not be able to allocate hugepages, because they are still
949 # being freed. Hence the -w switch.
950 # self.connection.execute("sudo killall -w Prox 2>/dev/null")
951 # prox_cmd = "export TERM=xterm; cd "+ self.bin_path +"; ./Prox -t
952 # -f ./handle_none-4.cfg"
953 # prox_cmd = "export TERM=xterm; export RTE_SDK=" + self._dpdk_dir +
955 # + "export RTE_TARGET=" + self._dpdk_target + ";" \
956 # + " cd " + self._prox_dir + "; make HW_DIRECT_STATS=y -j50;
958 # + "./build/Prox " + prox_args
959 # log.debug("Starting PROX with command [%s]", prox_cmd)
960 # thread.start_new_thread(self.ssh_check_quit, (self, self._user,
961 # self._ip, prox_cmd))
963 client = ProxSocketHelper()
965 # try connecting to Prox for 60s
966 for _ in range(RETRY_SECONDS):
967 time.sleep(RETRY_INTERVAL)
969 client.connect(self._ip, PROX_PORT)
970 except (socket.gaierror, socket.error):
975 msg = "Failed to connect to prox, please check if system {} accepts connections on port {}"
976 raise Exception(msg.format(self._ip, PROX_PORT))
979 class ProxDataHelper(object):
981 def __init__(self, vnfd_helper, sut, pkt_size, value, tolerated_loss):
982 super(ProxDataHelper, self).__init__()
983 self.vnfd_helper = vnfd_helper
985 self.pkt_size = pkt_size
987 self.tolerated_loss = tolerated_loss
988 self.port_count = len(self.vnfd_helper.port_pairs.all_ports)
990 self.measured_stats = None
992 self._totals_and_pps = None
993 self.result_tuple = None
996 def totals_and_pps(self):
997 if self._totals_and_pps is None:
998 rx_total, tx_total = self.sut.port_stats(range(self.port_count))[6:8]
999 pps = self.value / 100.0 * self.line_rate_to_pps()
1000 self._totals_and_pps = rx_total, tx_total, pps
1001 return self._totals_and_pps
1005 return self.totals_and_pps[0]
1009 return self.totals_and_pps[1]
1013 return self.totals_and_pps[2]
1018 for port_name, port_num in self.vnfd_helper.ports_iter():
1019 port_rx_total, port_tx_total = self.sut.port_stats([port_num])[6:8]
1020 samples[port_name] = {
1021 "in_packets": port_rx_total,
1022 "out_packets": port_tx_total,
1026 def __enter__(self):
1027 self.check_interface_count()
1030 def __exit__(self, exc_type, exc_val, exc_tb):
1033 def make_tuple(self):
1034 if self.result_tuple:
1037 self.result_tuple = ProxTestDataTuple(
1038 self.tolerated_loss,
1040 self.measured_stats['delta'].rx,
1041 self.measured_stats['delta'].tx,
1042 self.measured_stats['delta'].tsc,
1048 self.result_tuple.log_data()
1051 def measure_tot_stats(self):
1052 with self.sut.measure_tot_stats() as self.measured_stats:
1055 def check_interface_count(self):
1056 # do this assert in init? unless we expect interface count to
1057 # change from one run to another run...
1058 assert self.port_count in {1, 2, 4}, \
1059 "Invalid number of ports: 1, 2 or 4 ports only supported at this time"
1061 def capture_tsc_hz(self):
1062 self.tsc_hz = float(self.sut.hz())
1064 def line_rate_to_pps(self):
1065 # FIXME Don't hardcode 10Gb/s
1066 return self.port_count * TEN_GIGABIT / BITS_PER_BYTE / (self.pkt_size + 20)
1069 class ProxProfileHelper(object):
1071 __prox_profile_type__ = "Generic"
1073 PROX_CORE_GEN_MODE = "gen"
1074 PROX_CORE_LAT_MODE = "lat"
1077 def get_cls(cls, helper_type):
1078 """Return class of specified type."""
1080 return ProxProfileHelper
1082 for profile_helper_class in utils.itersubclasses(cls):
1083 if helper_type == profile_helper_class.__prox_profile_type__:
1084 return profile_helper_class
1086 return ProxProfileHelper
1089 def make_profile_helper(cls, resource_helper):
1090 return cls.get_cls(resource_helper.test_type)(resource_helper)
1092 def __init__(self, resource_helper):
1093 super(ProxProfileHelper, self).__init__()
1094 self.resource_helper = resource_helper
1095 self._cpu_topology = None
1096 self._test_cores = None
1097 self._latency_cores = None
1100 def cpu_topology(self):
1101 if not self._cpu_topology:
1102 stdout = io.BytesIO()
1103 self.ssh_helper.get_file_obj("/proc/cpuinfo", stdout)
1104 self._cpu_topology = SocketTopology.parse_cpuinfo(stdout.getvalue().decode('utf-8'))
1105 return self._cpu_topology
1108 def test_cores(self):
1109 if not self._test_cores:
1110 self._test_cores = self.get_cores(self.PROX_CORE_GEN_MODE)
1111 return self._test_cores
1114 def latency_cores(self):
1115 if not self._latency_cores:
1116 self._latency_cores = self.get_cores(self.PROX_CORE_LAT_MODE)
1117 return self._latency_cores
1120 def traffic_context(self, pkt_size, value):
1122 self.sut.reset_stats()
1124 self.sut.set_pkt_size(self.test_cores, pkt_size)
1125 self.sut.set_speed(self.test_cores, value)
1126 self.sut.start_all()
1131 def get_cores(self, mode):
1134 for section_name, section in self.setup_helper.prox_config_data:
1135 if not section_name.startswith("core"):
1138 for key, value in section:
1139 if key == "mode" and value == mode:
1140 core_tuple = CoreSocketTuple(section_name)
1141 core = core_tuple.find_in_topology(self.cpu_topology)
1146 def run_test(self, pkt_size, duration, value, tolerated_loss=0.0):
1147 data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss)
1149 with data_helper, self.traffic_context(pkt_size, value):
1150 with data_helper.measure_tot_stats():
1151 time.sleep(duration)
1152 # Getting statistics to calculate PPS at right speed....
1153 data_helper.capture_tsc_hz()
1154 data_helper.latency = self.get_latency()
1156 return data_helper.result_tuple, data_helper.samples
1158 def get_latency(self):
1160 :return: return lat_min, lat_max, lat_avg
1163 if self._latency_cores:
1164 return self.sut.lat_stats(self._latency_cores)
1167 def terminate(self):
1170 def __getattr__(self, item):
1171 return getattr(self.resource_helper, item)
1174 class ProxMplsProfileHelper(ProxProfileHelper):
1176 __prox_profile_type__ = "MPLS tag/untag"
1178 def __init__(self, resource_helper):
1179 super(ProxMplsProfileHelper, self).__init__(resource_helper)
1180 self._cores_tuple = None
1183 def mpls_cores(self):
1184 if not self._cores_tuple:
1185 self._cores_tuple = self.get_cores_mpls()
1186 return self._cores_tuple
1189 def tagged_cores(self):
1190 return self.mpls_cores[0]
1193 def plain_cores(self):
1194 return self.mpls_cores[1]
1196 def get_cores_mpls(self):
1199 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1200 if not section_name.startswith("core"):
1203 if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section):
1206 for item_key, item_value in section:
1207 if item_key != 'name':
1210 if item_value.startswith("tag"):
1211 core_tuple = CoreSocketTuple(section_name)
1212 core_tag = core_tuple.find_in_topology(self.cpu_topology)
1213 cores_tagged.append(core_tag)
1215 elif item_value.startswith("udp"):
1216 core_tuple = CoreSocketTuple(section_name)
1217 core_udp = core_tuple.find_in_topology(self.cpu_topology)
1218 cores_plain.append(core_udp)
1220 return cores_tagged, cores_plain
1223 def traffic_context(self, pkt_size, value):
1225 self.sut.reset_stats()
1227 self.sut.set_pkt_size(self.tagged_cores, pkt_size)
1228 self.sut.set_pkt_size(self.plain_cores, pkt_size - 4)
1229 self.sut.set_speed(self.tagged_cores, value)
1230 ratio = 1.0 * (pkt_size - 4 + 20) / (pkt_size + 20)
1231 self.sut.set_speed(self.plain_cores, value * ratio)
1232 self.sut.start_all()
1238 class ProxBngProfileHelper(ProxProfileHelper):
1240 __prox_profile_type__ = "BNG gen"
1242 def __init__(self, resource_helper):
1243 super(ProxBngProfileHelper, self).__init__(resource_helper)
1244 self._cores_tuple = None
1247 def bng_cores(self):
1248 if not self._cores_tuple:
1249 self._cores_tuple = self.get_cores_gen_bng_qos()
1250 return self._cores_tuple
1253 def cpe_cores(self):
1254 return self.bng_cores[0]
1257 def inet_cores(self):
1258 return self.bng_cores[1]
1261 def arp_cores(self):
1262 return self.bng_cores[2]
1265 def arp_task_cores(self):
1266 return self.bng_cores[3]
1269 def all_rx_cores(self):
1270 return self.latency_cores
1272 def get_cores_gen_bng_qos(self):
1276 arp_tasks_core = [0]
1277 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1278 if not section_name.startswith("core"):
1281 if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section):
1284 for item_key, item_value in section:
1285 if item_key != 'name':
1288 if item_value.startswith("cpe"):
1289 core_tuple = CoreSocketTuple(section_name)
1290 cpe_core = core_tuple.find_in_topology(self.cpu_topology)
1291 cpe_cores.append(cpe_core)
1293 elif item_value.startswith("inet"):
1294 core_tuple = CoreSocketTuple(section_name)
1295 inet_core = core_tuple.find_in_topology(self.cpu_topology)
1296 inet_cores.append(inet_core)
1298 elif item_value.startswith("arp"):
1299 core_tuple = CoreSocketTuple(section_name)
1300 arp_core = core_tuple.find_in_topology(self.cpu_topology)
1301 arp_cores.append(arp_core)
1303 # We check the tasks/core separately
1304 if item_value.startswith("arp_task"):
1305 core_tuple = CoreSocketTuple(section_name)
1306 arp_task_core = core_tuple.find_in_topology(self.cpu_topology)
1307 arp_tasks_core.append(arp_task_core)
1309 return cpe_cores, inet_cores, arp_cores, arp_tasks_core
1312 def traffic_context(self, pkt_size, value):
1313 # Tester is sending packets at the required speed already after
1314 # setup_test(). Just get the current statistics, sleep the required
1315 # amount of time and calculate packet loss.
1316 inet_pkt_size = pkt_size
1317 cpe_pkt_size = pkt_size - 24
1318 ratio = 1.0 * (cpe_pkt_size + 20) / (inet_pkt_size + 20)
1320 curr_up_speed = curr_down_speed = 0
1321 max_up_speed = max_down_speed = value
1323 max_down_speed = value * ratio
1325 max_up_speed = value / ratio
1331 # Flush any packets in the NIC RX buffers, otherwise the stats will be
1333 self.sut.start(self.all_rx_cores)
1335 self.sut.stop(self.all_rx_cores)
1337 self.sut.reset_stats()
1339 self.sut.set_pkt_size(self.inet_cores, inet_pkt_size)
1340 self.sut.set_pkt_size(self.cpe_cores, cpe_pkt_size)
1342 self.sut.reset_values(self.cpe_cores)
1343 self.sut.reset_values(self.inet_cores)
1345 # Set correct IP and UDP lengths in packet headers
1347 # IP length (byte 24): 26 for MAC(12), EthType(2), QinQ(8), CRC(4)
1348 self.sut.set_value(self.cpe_cores, 24, cpe_pkt_size - 26, 2)
1349 # UDP length (byte 46): 46 for MAC(12), EthType(2), QinQ(8), IP(20), CRC(4)
1350 self.sut.set_value(self.cpe_cores, 46, cpe_pkt_size - 46, 2)
1353 # IP length (byte 20): 22 for MAC(12), EthType(2), MPLS(4), CRC(4)
1354 self.sut.set_value(self.inet_cores, 20, inet_pkt_size - 22, 2)
1355 # IP length (byte 48): 50 for MAC(12), EthType(2), MPLS(4), IP(20), GRE(8), CRC(4)
1356 self.sut.set_value(self.inet_cores, 48, inet_pkt_size - 50, 2)
1357 # UDP length (byte 70): 70 for MAC(12), EthType(2), MPLS(4), IP(20), GRE(8), IP(20), CRC(4)
1358 self.sut.set_value(self.inet_cores, 70, inet_pkt_size - 70, 2)
1360 # Sending ARP to initialize tables - need a few seconds of generation
1361 # to make sure all CPEs are initialized
1362 LOG.info("Initializing SUT: sending ARP packets")
1363 self.sut.set_speed(self.arp_cores, 1, self.arp_task_cores)
1364 self.sut.set_speed(self.inet_cores, curr_up_speed)
1365 self.sut.set_speed(self.cpe_cores, curr_down_speed)
1366 self.sut.start(self.arp_cores)
1369 # Ramp up the transmission speed. First go to the common speed, then
1370 # increase steps for the faster one.
1371 self.sut.start(self.cpe_cores + self.inet_cores + self.latency_cores)
1373 LOG.info("Ramping up speed to %s up, %s down", max_up_speed, max_down_speed)
1375 while (curr_up_speed < max_up_speed) or (curr_down_speed < max_down_speed):
1376 # The min(..., ...) takes care of 1) floating point rounding errors
1377 # that could make curr_*_speed to be slightly greater than
1378 # max_*_speed and 2) max_*_speed not being an exact multiple of
1380 if curr_up_speed < max_up_speed:
1381 curr_up_speed = min(curr_up_speed + self.step_delta, max_up_speed)
1382 if curr_down_speed < max_down_speed:
1383 curr_down_speed = min(curr_down_speed + self.step_delta, max_down_speed)
1385 self.sut.set_speed(self.inet_cores, curr_up_speed)
1386 self.sut.set_speed(self.cpe_cores, curr_down_speed)
1387 time.sleep(self.step_time)
1389 LOG.info("Target speeds reached. Starting real test.")
1393 self.sut.stop(self.arp_cores + self.cpe_cores + self.inet_cores)
1394 LOG.info("Test ended. Flushing NIC buffers")
1395 self.sut.start(self.all_rx_cores)
1397 self.sut.stop(self.all_rx_cores)
1399 def run_test(self, pkt_size, duration, value, tolerated_loss=0.0):
1400 data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss)
1402 with data_helper, self.traffic_context(pkt_size, value):
1403 with data_helper.measure_tot_stats():
1404 time.sleep(duration)
1405 # Getting statistics to calculate PPS at right speed....
1406 data_helper.capture_tsc_hz()
1407 data_helper.latency = self.get_latency()
1409 return data_helper.result_tuple, data_helper.samples
1412 class ProxVpeProfileHelper(ProxProfileHelper):
1414 __prox_profile_type__ = "vPE gen"
1416 def __init__(self, resource_helper):
1417 super(ProxVpeProfileHelper, self).__init__(resource_helper)
1418 self._cores_tuple = None
1419 self._ports_tuple = None
1422 def vpe_cores(self):
1423 if not self._cores_tuple:
1424 self._cores_tuple = self.get_cores_gen_vpe()
1425 return self._cores_tuple
1428 def cpe_cores(self):
1429 return self.vpe_cores[0]
1432 def inet_cores(self):
1433 return self.vpe_cores[1]
1436 def all_rx_cores(self):
1437 return self.latency_cores
1440 def vpe_ports(self):
1441 if not self._ports_tuple:
1442 self._ports_tuple = self.get_ports_gen_vpe()
1443 return self._ports_tuple
1446 def cpe_ports(self):
1447 return self.vpe_ports[0]
1450 def inet_ports(self):
1451 return self.vpe_ports[1]
1453 def get_cores_gen_vpe(self):
1456 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1457 if not section_name.startswith("core"):
1460 if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section):
1463 for item_key, item_value in section:
1464 if item_key != 'name':
1467 if item_value.startswith("cpe"):
1468 core_tuple = CoreSocketTuple(section_name)
1469 core_tag = core_tuple.find_in_topology(self.cpu_topology)
1470 cpe_cores.append(core_tag)
1472 elif item_value.startswith("inet"):
1473 core_tuple = CoreSocketTuple(section_name)
1474 inet_core = core_tuple.find_in_topology(self.cpu_topology)
1475 inet_cores.append(inet_core)
1477 return cpe_cores, inet_cores
1479 def get_ports_gen_vpe(self):
1483 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1484 if not section_name.startswith("port"):
1486 tx_port_iter = re.finditer(r'\d+', section_name)
1487 tx_port_no = int(next(tx_port_iter).group(0))
1489 for item_key, item_value in section:
1490 if item_key != 'name':
1493 for item_key, item_value in section:
1494 if item_value.startswith("cpe"):
1495 cpe_ports.append(tx_port_no)
1497 elif item_value.startswith("inet"):
1498 inet_ports.append(tx_port_no)
1500 return cpe_ports, inet_ports
1503 def traffic_context(self, pkt_size, value):
1504 # Calculate the target upload and download speed. The upload and
1505 # download packets have different packet sizes, so in order to get
1506 # equal bandwidth usage, the ratio of the speeds has to match the ratio
1507 # of the packet sizes.
1508 cpe_pkt_size = pkt_size
1509 inet_pkt_size = pkt_size - 4
1510 ratio = 1.0 * (cpe_pkt_size + 20) / (inet_pkt_size + 20)
1512 curr_up_speed = curr_down_speed = 0
1513 max_up_speed = max_down_speed = value
1515 max_down_speed = value * ratio
1517 max_up_speed = value / ratio
1519 # Adjust speed when multiple cores per port are used to generate traffic
1520 if len(self.cpe_ports) != len(self.cpe_cores):
1521 max_down_speed *= 1.0 * len(self.cpe_ports) / len(self.cpe_cores)
1522 if len(self.inet_ports) != len(self.inet_cores):
1523 max_up_speed *= 1.0 * len(self.inet_ports) / len(self.inet_cores)
1529 # Flush any packets in the NIC RX buffers, otherwise the stats will be
1531 self.sut.start(self.all_rx_cores)
1533 self.sut.stop(self.all_rx_cores)
1535 self.sut.reset_stats()
1537 self.sut.set_pkt_size(self.inet_cores, inet_pkt_size)
1538 self.sut.set_pkt_size(self.cpe_cores, cpe_pkt_size)
1540 self.sut.reset_values(self.cpe_cores)
1541 self.sut.reset_values(self.inet_cores)
1543 # Set correct IP and UDP lengths in packet headers
1544 # CPE: IP length (byte 24): 26 for MAC(12), EthType(2), QinQ(8), CRC(4)
1545 self.sut.set_value(self.cpe_cores, 24, cpe_pkt_size - 26, 2)
1546 # UDP length (byte 46): 46 for MAC(12), EthType(2), QinQ(8), IP(20), CRC(4)
1547 self.sut.set_value(self.cpe_cores, 46, cpe_pkt_size - 46, 2)
1549 # INET: IP length (byte 20): 22 for MAC(12), EthType(2), MPLS(4), CRC(4)
1550 self.sut.set_value(self.inet_cores, 20, inet_pkt_size - 22, 2)
1551 # UDP length (byte 42): 42 for MAC(12), EthType(2), MPLS(4), IP(20), CRC(4)
1552 self.sut.set_value(self.inet_cores, 42, inet_pkt_size - 42, 2)
1554 self.sut.set_speed(self.inet_cores, curr_up_speed)
1555 self.sut.set_speed(self.cpe_cores, curr_down_speed)
1557 # Ramp up the transmission speed. First go to the common speed, then
1558 # increase steps for the faster one.
1559 self.sut.start(self.cpe_cores + self.inet_cores + self.all_rx_cores)
1561 LOG.info("Ramping up speed to %s up, %s down", max_up_speed, max_down_speed)
1563 while (curr_up_speed < max_up_speed) or (curr_down_speed < max_down_speed):
1564 # The min(..., ...) takes care of 1) floating point rounding errors
1565 # that could make curr_*_speed to be slightly greater than
1566 # max_*_speed and 2) max_*_speed not being an exact multiple of
1568 if curr_up_speed < max_up_speed:
1569 curr_up_speed = min(curr_up_speed + self.step_delta, max_up_speed)
1570 if curr_down_speed < max_down_speed:
1571 curr_down_speed = min(curr_down_speed + self.step_delta, max_down_speed)
1573 self.sut.set_speed(self.inet_cores, curr_up_speed)
1574 self.sut.set_speed(self.cpe_cores, curr_down_speed)
1575 time.sleep(self.step_time)
1577 LOG.info("Target speeds reached. Starting real test.")
1581 self.sut.stop(self.cpe_cores + self.inet_cores)
1582 LOG.info("Test ended. Flushing NIC buffers")
1583 self.sut.start(self.all_rx_cores)
1585 self.sut.stop(self.all_rx_cores)
1587 def run_test(self, pkt_size, duration, value, tolerated_loss=0.0):
1588 data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss)
1590 with data_helper, self.traffic_context(pkt_size, value):
1591 with data_helper.measure_tot_stats():
1592 time.sleep(duration)
1593 # Getting statistics to calculate PPS at right speed....
1594 data_helper.capture_tsc_hz()
1595 data_helper.latency = self.get_latency()
1597 return data_helper.result_tuple, data_helper.samples