1 # Copyright (c) 2018 Intel Corporation
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
25 from collections import OrderedDict, namedtuple
26 from contextlib import contextmanager
27 from itertools import repeat, chain
28 from multiprocessing import Queue
31 from six.moves import cStringIO
32 from six.moves import zip, StringIO
34 from yardstick.common import utils
35 from yardstick.common.utils import SocketTopology, join_non_strings, try_int
36 from yardstick.network_services.helpers.iniparser import ConfigParser
37 from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
38 from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
39 from yardstick.network_services import constants
46 LOG = logging.getLogger(__name__)
47 LOG.setLevel(logging.DEBUG)
48 LOG_RESULT = logging.getLogger('yardstick')
49 LOG_RESULT.setLevel(logging.DEBUG)
55 CONFIGURATION_OPTIONS = (
56 # dict key section key default value
57 ('pktSizes', 'general', 'pkt_sizes', '64,128,256,512,1024,1280,1518'),
58 ('testDuration', 'general', 'test_duration', 5.0),
59 ('testPrecision', 'general', 'test_precision', 1.0),
60 ('tests', 'general', 'tests', None),
61 ('toleratedLoss', 'general', 'tolerated_loss', 0.0),
63 ('logFile', 'logging', 'file', 'dats.log'),
64 ('logDateFormat', 'logging', 'datefmt', None),
65 ('logLevel', 'logging', 'level', 'INFO'),
66 ('logOverwrite', 'logging', 'overwrite', 1),
68 ('testerIp', 'tester', 'ip', None),
69 ('testerUser', 'tester', 'user', 'root'),
70 ('testerDpdkDir', 'tester', 'rte_sdk', '/root/dpdk'),
71 ('testerDpdkTgt', 'tester', 'rte_target', 'x86_64-native-linuxapp-gcc'),
72 ('testerProxDir', 'tester', 'prox_dir', '/root/prox'),
73 ('testerSocketId', 'tester', 'socket_id', 0),
75 ('sutIp', 'sut', 'ip', None),
76 ('sutUser', 'sut', 'user', 'root'),
77 ('sutDpdkDir', 'sut', 'rte_sdk', '/root/dpdk'),
78 ('sutDpdkTgt', 'sut', 'rte_target', 'x86_64-native-linuxapp-gcc'),
79 ('sutProxDir', 'sut', 'prox_dir', '/root/prox'),
80 ('sutSocketId', 'sut', 'socket_id', 0),
84 class CoreSocketTuple(namedtuple('CoreTuple', 'core_id, socket_id, hyperthread')):
85 CORE_RE = re.compile(r"core\s+(\d+)(?:s(\d+))?(h)?$")
87 def __new__(cls, *args):
89 matches = cls.CORE_RE.search(str(args[0]))
91 args = matches.groups()
93 return super(CoreSocketTuple, cls).__new__(cls, int(args[0]), try_int(args[1], 0),
94 'h' if args[2] else '')
96 except (AttributeError, TypeError, IndexError, ValueError):
97 raise ValueError('Invalid core spec {}'.format(args))
99 def is_hyperthread(self):
100 return self.hyperthread == 'h'
104 return int(self.is_hyperthread())
106 def find_in_topology(self, cpu_topology):
108 socket_core_match = cpu_topology[self.socket_id][self.core_id]
109 sorted_match = sorted(socket_core_match.values())
110 return sorted_match[self.index][0]
111 except (KeyError, IndexError):
112 template = "Core {}{} on socket {} does not exist"
113 raise ValueError(template.format(self.core_id, self.hyperthread, self.socket_id))
116 class TotStatsTuple(namedtuple('TotStats', 'rx,tx,tsc,hz')):
117 def __new__(cls, *args):
119 assert args[0] is not str(args[0])
120 args = tuple(args[0])
121 except (AssertionError, IndexError, TypeError):
124 return super(TotStatsTuple, cls).__new__(cls, *args)
127 class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_rx,'
128 'delta_tx,delta_tsc,'
129 'latency,rx_total,tx_total,'
134 return 1e2 * self.drop_total / float(self.tx_total)
135 except ZeroDivisionError:
140 # calculate the effective throughput in Mpps
141 return float(self.delta_tx) * self.tsc_hz / self.delta_tsc / 1e6
145 # calculate the effective throughput in Mpps
146 return float(self.delta_rx) * self.tsc_hz / self.delta_tsc / 1e6
149 def can_be_lost(self):
150 return int(self.tx_total * self.tolerated / 1e2)
153 def drop_total(self):
154 return self.tx_total - self.rx_total
158 return self.drop_total <= self.can_be_lost
160 def get_samples(self, pkt_size, pkt_loss=None, port_samples=None):
162 pkt_loss = self.pkt_loss
164 if port_samples is None:
174 "Throughput": self.rx_mpps,
175 "RxThroughput": self.rx_mpps,
176 "DropPackets": pkt_loss,
177 "CurrentDropPackets": pkt_loss,
178 "RequestedTxThroughput": self.requested_pps / 1e6,
179 "TxThroughput": self.tx_mpps,
183 samples.update(port_samples)
185 samples.update((key, value) for key, value in zip(latency_keys, self.latency))
188 def log_data(self, logger=None):
192 template = "RX: %d; TX: %d; dropped: %d (tolerated: %d)"
193 logger.info(template, self.rx_total, self.tx_total, self.drop_total, self.can_be_lost)
194 logger.info("Mpps configured: %f; Mpps generated %f; Mpps received %f",
195 self.requested_pps / 1e6, self.tx_mpps, self.rx_mpps)
198 class PacketDump(object):
200 def assert_func(func, value1, value2, template=None):
201 assert func(value1, value2), template.format(value1, value2)
203 def __init__(self, port_id, data_len, payload):
204 template = "Packet dump has specified length {}, but payload is {} bytes long"
205 self.assert_func(operator.eq, data_len, len(payload), template)
206 self._port_id = port_id
207 self._data_len = data_len
208 self._payload = payload
212 """Get the port id of the packet dump"""
217 """Get the length of the data received"""
218 return self._data_len
221 return '<PacketDump port: {} payload: {}>'.format(self._port_id, self._payload)
223 def payload(self, start=None, end=None):
224 """Get part of the payload as a list of ordinals.
226 Returns a list of byte values, matching the contents of the packet dump.
227 Optional start and end parameters can be specified to retrieve only a
228 part of the packet contents.
230 The number of elements in the list is equal to end - start + 1, so end
231 is the offset of the last character.
234 start (pos. int): the starting offset in the payload. If it is not
235 specified or None, offset 0 is assumed.
236 end (pos. int): the ending offset of the payload. If it is not
237 specified or None, the contents until the end of the packet are
241 [int, int, ...]. Each int represents the ordinal value of a byte in
248 end = self.data_len - 1
250 # Bounds checking on offsets
251 template = "Start offset must be non-negative"
252 self.assert_func(operator.ge, start, 0, template)
254 template = "End offset must be less than {1}"
255 self.assert_func(operator.lt, end, self.data_len, template)
257 # Adjust for splice operation: end offset must be 1 more than the offset
258 # of the last desired character.
261 return self._payload[start:end]
264 class ProxSocketHelper(object):
266 def __init__(self, sock=None):
267 """ creates new prox instance """
268 super(ProxSocketHelper, self).__init__()
271 sock = socket.socket()
275 self.master_stats = None
277 def connect(self, ip, port):
278 """Connect to the prox instance on the remote system"""
279 self._sock.connect((ip, port))
281 def get_socket(self):
282 """ get the socket connected to the remote instance """
285 def _parse_socket_data(self, decoded_data, pkt_dump_only):
286 def get_newline_index():
287 return decoded_data.find('\n', index)
291 for newline_index in iter(get_newline_index, -1):
292 ret_str = decoded_data[index:newline_index]
295 mode, port_id, data_len = ret_str.split(',', 2)
297 mode, port_id, data_len = None, None, None
299 if mode != 'pktdump':
300 # Regular 1-line message. Stop reading from the socket.
301 LOG.debug("Regular response read")
304 LOG.debug("Packet dump header read: [%s]", ret_str)
306 # The line is a packet dump header. Parse it, read the
307 # packet payload, store the dump for later retrieval.
308 # Skip over the packet dump and continue processing: a
309 # 1-line response may follow the packet dump.
311 data_len = int(data_len)
312 data_start = newline_index + 1 # + 1 to skip over \n
313 data_end = data_start + data_len
314 sub_data = decoded_data[data_start:data_end]
315 pkt_payload = array.array('B', (ord(v) for v in sub_data))
316 pkt_dump = PacketDump(int(port_id), data_len, pkt_payload)
317 self._pkt_dumps.append(pkt_dump)
320 # Return boolean instead of string to signal
321 # successful reception of the packet dump.
322 LOG.debug("Packet dump stored, returning")
327 return ret_str, False
329 def get_string(self, pkt_dump_only=False, timeout=0.01):
331 def is_ready_string():
332 # recv() is blocking, so avoid calling it when no data is waiting.
333 ready = select.select([self._sock], [], [], timeout)
334 return bool(ready[0])
338 while status is False:
339 for status in iter(is_ready_string, False):
340 decoded_data = self._sock.recv(256).decode('utf-8')
341 ret_str, done = self._parse_socket_data(decoded_data,
347 LOG.debug("Received data from socket: [%s]", ret_str)
348 return status, ret_str
350 def get_data(self, pkt_dump_only=False, timeout=0.01):
351 """ read data from the socket """
353 # This method behaves slightly differently depending on whether it is
354 # called to read the response to a command (pkt_dump_only = 0) or if
355 # it is called specifically to read a packet dump (pkt_dump_only = 1).
357 # Packet dumps look like:
358 # pktdump,<port_id>,<data_len>\n
359 # <packet contents as byte array>\n
360 # This means the total packet dump message consists of 2 lines instead
363 # - Response for a command (pkt_dump_only = 0):
364 # 1) Read response from the socket until \n (end of message)
365 # 2a) If the response is a packet dump header (starts with "pktdump,"):
366 # - Read the packet payload and store the packet dump for later
368 # - Reset the state and restart from 1). Eventually state 2b) will
369 # be reached and the function will return.
370 # 2b) If the response is not a packet dump:
371 # - Return the received message as a string
373 # - Explicit request to read a packet dump (pkt_dump_only = 1):
374 # - Read the dump header and payload
375 # - Store the packet dump for later retrieval
376 # - Return True to signify a packet dump was successfully read
379 # recv() is blocking, so avoid calling it when no data is waiting.
380 ready = select.select([self._sock], [], [], timeout)
381 return bool(ready[0])
385 for status in iter(is_ready, False):
386 decoded_data = self._sock.recv(256).decode('utf-8')
387 ret_str, done = self._parse_socket_data(decoded_data, pkt_dump_only)
391 LOG.debug("Received data from socket: [%s]", ret_str)
392 return ret_str if status else ''
394 def put_command(self, to_send):
395 """ send data to the remote instance """
396 LOG.debug("Sending data to socket: [%s]", to_send.rstrip('\n'))
398 # NOTE: sendall will block, we need a timeout
399 self._sock.sendall(to_send.encode('utf-8'))
400 except: # pylint: disable=bare-except
403 def get_packet_dump(self):
404 """ get the next packet dump """
406 return self._pkt_dumps.pop(0)
409 def stop_all_reset(self):
410 """ stop the remote instance and reset stats """
411 LOG.debug("Stop all and reset stats")
416 """ stop all cores on the remote instance """
417 LOG.debug("Stop all")
418 self.put_command("stop all\n")
420 def stop(self, cores, task=''):
421 """ stop specific cores on the remote instance """
425 if core not in tmpcores:
426 tmpcores.append(core)
428 LOG.debug("Stopping cores %s", tmpcores)
429 self.put_command("stop {} {}\n".format(join_non_strings(',', tmpcores), task))
432 """ start all cores on the remote instance """
433 LOG.debug("Start all")
434 self.put_command("start all\n")
436 def start(self, cores):
437 """ start specific cores on the remote instance """
441 if core not in tmpcores:
442 tmpcores.append(core)
444 LOG.debug("Starting cores %s", tmpcores)
445 self.put_command("start {}\n".format(join_non_strings(',', tmpcores)))
447 def reset_stats(self):
448 """ reset the statistics on the remote instance """
449 LOG.debug("Reset stats")
450 self.put_command("reset stats\n")
452 def _run_template_over_cores(self, template, cores, *args):
454 self.put_command(template.format(core, *args))
456 def set_pkt_size(self, cores, pkt_size):
457 """ set the packet size to generate on the remote instance """
458 LOG.debug("Set packet size for core(s) %s to %d", cores, pkt_size)
460 self._run_template_over_cores("pkt_size {} 0 {}\n", cores, pkt_size)
462 def set_value(self, cores, offset, value, length):
463 """ set value on the remote instance """
464 msg = "Set value for core(s) %s to '%s' (length %d), offset %d"
465 LOG.debug(msg, cores, value, length, offset)
466 template = "set value {} 0 {} {} {}\n"
467 self._run_template_over_cores(template, cores, offset, value, length)
469 def reset_values(self, cores):
470 """ reset values on the remote instance """
471 LOG.debug("Set value for core(s) %s", cores)
472 self._run_template_over_cores("reset values {} 0\n", cores)
474 def set_speed(self, cores, speed, tasks=None):
475 """ set speed on the remote instance """
477 tasks = [0] * len(cores)
478 elif len(tasks) != len(cores):
479 LOG.error("set_speed: cores and tasks must have the same len")
480 LOG.debug("Set speed for core(s)/tasks(s) %s to %g", list(zip(cores, tasks)), speed)
481 for (core, task) in list(zip(cores, tasks)):
482 self.put_command("speed {} {} {}\n".format(core, task, speed))
484 def slope_speed(self, cores_speed, duration, n_steps=0):
485 """will start to increase speed from 0 to N where N is taken from
486 a['speed'] for each a in cores_speed"""
487 # by default, each step will take 0.5 sec
489 n_steps = duration * 2
491 private_core_data = []
492 step_duration = float(duration) / n_steps
493 for core_data in cores_speed:
494 target = float(core_data['speed'])
495 private_core_data.append({
496 'cores': core_data['cores'],
498 'delta': target / n_steps,
503 deltas_keys_iter = repeat(('current', 'delta'), n_steps - 1)
504 for key1, key2 in chain(deltas_keys_iter, [('zero', 'speed')]):
505 time.sleep(step_duration)
506 for core_data in private_core_data:
507 core_data['current'] = core_data[key1] + core_data[key2]
508 self.set_speed(core_data['cores'], core_data['current'])
510 def set_pps(self, cores, pps, pkt_size,
511 line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
512 """ set packets per second for specific cores on the remote instance """
513 msg = "Set packets per sec for core(s) %s to %g%% of line rate (packet size: %d)"
514 LOG.debug(msg, cores, pps, pkt_size)
516 # speed in percent of line-rate
517 speed = float(pps) * (pkt_size + 20) / line_speed / BITS_PER_BYTE
518 self._run_template_over_cores("speed {} 0 {}\n", cores, speed)
520 def lat_stats(self, cores, task=0):
521 """Get the latency statistics from the remote system"""
522 # 1-based index, if max core is 4, then 0, 1, 2, 3, 4 len = 5
527 self.put_command("lat stats {} {} \n".format(core, task))
528 ret = self.get_data()
531 lat_min[core], lat_max[core], lat_avg[core] = \
532 tuple(int(n) for n in ret.split(",")[:3])
534 except (AttributeError, ValueError, TypeError):
537 return lat_min, lat_max, lat_avg
539 def get_all_tot_stats(self):
540 self.put_command("tot stats\n")
541 all_stats_str = self.get_data().split(",")
542 if len(all_stats_str) != 4:
545 all_stats = TotStatsTuple(int(v) for v in all_stats_str)
546 self.master_stats = all_stats
550 return self.get_all_tot_stats()[3]
552 def core_stats(self, cores, task=0):
553 """Get the receive statistics from the remote system"""
554 rx = tx = drop = tsc = 0
556 self.put_command("core stats {} {}\n".format(core, task))
557 ret = self.get_data().split(",")
562 return rx, tx, drop, tsc
564 def multi_port_stats(self, ports):
565 """get counter values from all ports at once"""
567 ports_str = ",".join(map(str, ports))
569 tot_result = [0] * len(ports)
572 while (len(ports) is not len(ports_all_data)):
573 self.put_command("multi port stats {}\n".format(ports_str))
574 status, ports_all_data_str = self.get_string()
579 ports_all_data = ports_all_data_str.split(";")
581 if len(ports) is len(ports_all_data):
582 for port_data_str in ports_all_data:
586 tmpdata = [try_int(s, 0) for s in port_data_str.split(",")]
587 except (IndexError, TypeError):
588 LOG.error("Unpacking data error %s", port_data_str)
591 if (len(tmpdata) < 6) or tmpdata[0] not in ports:
592 LOG.error("Corrupted PACKET %s - retrying", port_data_str)
595 tot_result[port_index] = tmpdata
596 port_index = port_index + 1
598 LOG.error("Empty / too much data - retry -%s-", ports_all_data)
601 LOG.debug("Multi port packet ..OK.. %s", tot_result)
602 return True, tot_result
605 def multi_port_stats_tuple(stats, ports):
607 Create a statistics tuple from port stats.
609 Returns a dict with contains the port stats indexed by port name
611 :param stats: (List) - List of List of port stats in pps
612 :param ports (Iterator) - to List of Ports
614 :return: (Dict) of port stats indexed by port_name
620 port_names = {port_num: port_name for port_name, port_num in ports}
621 except (TypeError, IndexError, KeyError):
622 LOG.critical("Ports are not initialized or number of port is ZERO ... CRITICAL ERROR")
628 samples[port_names[port_num]] = {
629 "in_packets": stat[1],
630 "out_packets": stat[2]}
631 except (TypeError, IndexError, KeyError):
632 LOG.error("Ports data and samples data is incompatable ....")
638 def multi_port_stats_diff(prev_stats, new_stats, hz):
640 Create a statistics tuple from difference between prev port stats
641 and current port stats. And store results in pps.
643 :param prev_stats: (List) - Previous List of port statistics
644 :param new_stats: (List) - Current List of port statistics
645 :param hz (float) - speed of system in Hz
647 :return: sample (List) - Difference of prev_port_stats and
648 new_port_stats in pps
657 if len(prev_stats) is not len(new_stats):
658 for port_index, stat in enumerate(new_stats):
659 stats.append([port_index, float(0), float(0), 0, 0, 0])
663 for port_index, stat in enumerate(new_stats):
664 if stat[RX_TOTAL_INDEX] > prev_stats[port_index][RX_TOTAL_INDEX]:
665 rx_total = stat[RX_TOTAL_INDEX] - \
666 prev_stats[port_index][RX_TOTAL_INDEX]
668 rx_total = stat[RX_TOTAL_INDEX]
670 if stat[TX_TOTAL_INDEX] > prev_stats[port_index][TX_TOTAL_INDEX]:
671 tx_total = stat[TX_TOTAL_INDEX] - prev_stats[port_index][TX_TOTAL_INDEX]
673 tx_total = stat[TX_TOTAL_INDEX]
675 if stat[TSC_INDEX] > prev_stats[port_index][TSC_INDEX]:
676 tsc = stat[TSC_INDEX] - prev_stats[port_index][TSC_INDEX]
678 tsc = stat[TSC_INDEX]
681 rx_total = tx_total = float(0)
684 LOG.error("HZ is ZERO ..")
685 rx_total = tx_total = float(0)
687 rx_total = float(rx_total * hz / tsc)
688 tx_total = float(tx_total * hz / tsc)
690 stats.append([port_index, rx_total, tx_total, 0, 0, tsc])
691 except (TypeError, IndexError, KeyError):
693 LOG.info("Current Port Stats incompatable to previous Port stats .. Discarded")
697 def port_stats(self, ports):
698 """get counter values from a specific port"""
699 tot_result = [0] * 12
701 self.put_command("port_stats {}\n".format(port))
702 ret = [try_int(s, 0) for s in self.get_data().split(",")]
703 tot_result = [sum(x) for x in zip(tot_result, ret)]
707 def measure_tot_stats(self):
708 start = self.get_all_tot_stats()
709 container = {'start_tot': start}
713 container['end_tot'] = end = self.get_all_tot_stats()
715 container['delta'] = TotStatsTuple(e - s for s, e in zip(start, end))
718 """Get the total statistics from the remote system"""
719 stats = self.get_all_tot_stats()
722 def tot_ierrors(self):
723 """Get the total ierrors from the remote system"""
724 self.put_command("tot ierrors tot\n")
725 recv = self.get_data().split(',')
726 tot_ierrors = int(recv[0])
728 return tot_ierrors, tsc
730 def set_count(self, count, cores):
731 """Set the number of packets to send on the specified core"""
732 self._run_template_over_cores("count {} 0 {}\n", cores, count)
734 def dump_rx(self, core_id, task_id=0, count=1):
735 """Activate dump on rx on the specified core"""
736 LOG.debug("Activating dump on RX for core %d, task %d, count %d", core_id, task_id, count)
737 self.put_command("dump_rx {} {} {}\n".format(core_id, task_id, count))
738 time.sleep(1.5) # Give PROX time to set up packet dumping
746 """ stop all cores on the remote instance """
747 LOG.debug("Quit prox")
748 self.put_command("quit\n")
751 def force_quit(self):
752 """ stop all cores on the remote instance """
753 LOG.debug("Force Quit prox")
754 self.put_command("quit_force\n")
758 _LOCAL_OBJECT = object()
761 class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper):
762 # the actual app is lowercase
764 # not used for Prox but added for consistency
767 LUA_PARAMETER_NAME = ""
768 LUA_PARAMETER_PEER = {
773 CONFIG_QUEUE_TIMEOUT = 120
775 def __init__(self, vnfd_helper, ssh_helper, scenario_helper):
776 self.remote_path = None
777 super(ProxDpdkVnfSetupEnvHelper, self).__init__(vnfd_helper, ssh_helper, scenario_helper)
778 self.remote_prox_file_name = None
779 self._prox_config_data = None
780 self.additional_files = {}
781 self.config_queue = Queue()
782 # allow_exit_without_flush
783 self.config_queue.cancel_join_thread()
784 self._global_section = None
787 def prox_config_data(self):
788 if self._prox_config_data is None:
789 # this will block, but it needs too
790 self._prox_config_data = self.config_queue.get(True, self.CONFIG_QUEUE_TIMEOUT)
791 return self._prox_config_data
794 def global_section(self):
795 if self._global_section is None and self.prox_config_data:
796 self._global_section = self.find_section("global")
797 return self._global_section
799 def find_section(self, name, default=_LOCAL_OBJECT):
800 result = next((value for key, value in self.prox_config_data if key == name), default)
801 if result is _LOCAL_OBJECT:
802 raise KeyError('{} not found in Prox config'.format(name))
805 def find_in_section(self, section_name, section_key, default=_LOCAL_OBJECT):
806 section = self.find_section(section_name, [])
807 result = next((value for key, value in section if key == section_key), default)
808 if result is _LOCAL_OBJECT:
809 template = '{} not found in {} section of Prox config'
810 raise KeyError(template.format(section_key, section_name))
813 def copy_to_target(self, config_file_path, prox_file):
814 remote_path = os.path.join("/tmp", prox_file)
815 self.ssh_helper.put(config_file_path, remote_path)
819 def _get_tx_port(section, sections):
821 for item in sections[section]:
822 if item[0] == "tx port":
823 iface_port = re.findall(r'\d+', item[1])
824 # do we want the last one?
825 # if yes, then can we reverse?
826 return int(iface_port[0])
829 def _replace_quoted_with_value(quoted, value, count=1):
830 new_string = re.sub('"[^"]*"', '"{}"'.format(value), quoted, count)
833 def _insert_additional_file(self, value):
834 file_str = value.split('"')
835 base_name = os.path.basename(file_str[1])
836 file_str[1] = self.additional_files[base_name]
837 return '"'.join(file_str)
839 def generate_prox_config_file(self, config_path):
841 prox_config = ConfigParser(config_path, sections)
844 # Ensure MAC is set "hardware"
845 all_ports = self.vnfd_helper.port_pairs.all_ports
846 # use dpdk port number
847 for port_name in all_ports:
848 port_num = self.vnfd_helper.port_num(port_name)
849 port_section_name = "port {}".format(port_num)
850 for section_name, section in sections:
851 if port_section_name != section_name:
854 for section_data in section:
855 if section_data[0] == "mac":
856 section_data[1] = "hardware"
859 for _, section in sections:
860 for section_data in section:
861 item_key, item_val = section_data
862 if item_val.startswith("@@dst_mac"):
863 tx_port_iter = re.finditer(r'\d+', item_val)
864 tx_port_no = int(next(tx_port_iter).group(0))
865 intf = self.vnfd_helper.find_interface_by_port(tx_port_no)
866 mac = intf["virtual-interface"]["dst_mac"]
867 section_data[1] = mac.replace(":", " ", 6)
869 if item_key == "dst mac" and item_val.startswith("@@"):
870 tx_port_iter = re.finditer(r'\d+', item_val)
871 tx_port_no = int(next(tx_port_iter).group(0))
872 intf = self.vnfd_helper.find_interface_by_port(tx_port_no)
873 mac = intf["virtual-interface"]["dst_mac"]
874 section_data[1] = mac
876 if item_val.startswith("@@src_mac"):
877 tx_port_iter = re.finditer(r'\d+', item_val)
878 tx_port_no = int(next(tx_port_iter).group(0))
879 intf = self.vnfd_helper.find_interface_by_port(tx_port_no)
880 mac = intf["virtual-interface"]["local_mac"]
881 section_data[1] = mac.replace(":", " ", 6)
883 if item_key == "src mac" and item_val.startswith("@@"):
884 tx_port_iter = re.finditer(r'\d+', item_val)
885 tx_port_no = int(next(tx_port_iter).group(0))
886 intf = self.vnfd_helper.find_interface_by_port(tx_port_no)
887 mac = intf["virtual-interface"]["local_mac"]
888 section_data[1] = mac
890 # if addition file specified in prox config
891 if not self.additional_files:
894 for section_name, section in sections:
895 for section_data in section:
897 if section_data[0].startswith("dofile"):
898 section_data[0] = self._insert_additional_file(section_data[0])
900 if section_data[1].startswith("dofile"):
901 section_data[1] = self._insert_additional_file(section_data[1])
902 except: # pylint: disable=bare-except
908 def write_prox_lua(lua_config):
910 Write an .ini-format config file for PROX (parameters.lua)
911 PROX does not allow a space before/after the =, so we need
915 for key in lua_config:
916 value = '"' + lua_config[key] + '"'
917 if key == "__name__":
919 if value is not None and value != '@':
920 key = "=".join((key, str(value).replace('\n', '\n\t')))
923 key = str(key).replace('\n', '\n\t')
925 return os.linesep.join(out)
928 def write_prox_config(prox_config):
930 Write an .ini-format config file for PROX
931 PROX does not allow a space before/after the =, so we need
935 for (section_name, section) in prox_config:
936 out.append("[{}]".format(section_name))
939 if key == "__name__":
941 if value is not None and value != '@':
942 key = "=".join((key, str(value).replace('\n', '\n\t')))
945 key = str(key).replace('\n', '\n\t')
947 return os.linesep.join(out)
949 def put_string_to_file(self, s, remote_path):
950 file_obj = cStringIO(s)
951 self.ssh_helper.put_file_obj(file_obj, remote_path)
954 def generate_prox_lua_file(self):
956 all_ports = self.vnfd_helper.port_pairs.all_ports
957 for port_name in all_ports:
958 port_num = self.vnfd_helper.port_num(port_name)
959 intf = self.vnfd_helper.find_interface(name=port_name)
960 vintf = intf['virtual-interface']
961 p["tester_mac{0}".format(port_num)] = vintf["dst_mac"]
962 p["src_mac{0}".format(port_num)] = vintf["local_mac"]
966 def upload_prox_lua(self, config_file, lua_data):
967 # prox can't handle spaces around ' = ' so use custom method
968 out = StringIO(self.write_prox_lua(lua_data))
970 remote_path = os.path.join("/tmp", config_file)
971 self.ssh_helper.put_file_obj(out, remote_path)
975 def upload_prox_config(self, config_file, prox_config_data):
976 # prox can't handle spaces around ' = ' so use custom method
977 out = StringIO(self.write_prox_config(prox_config_data))
979 remote_path = os.path.join("/tmp", config_file)
980 self.ssh_helper.put_file_obj(out, remote_path)
984 def build_config_file(self):
985 task_path = self.scenario_helper.task_path
986 options = self.scenario_helper.options
987 config_path = options['prox_config']
988 config_file = os.path.basename(config_path)
989 config_path = utils.find_relative_file(config_path, task_path)
990 self.additional_files = {}
993 if options['prox_generate_parameter']:
995 self.lua = self.generate_prox_lua_file()
996 if len(self.lua) > 0:
997 self.upload_prox_lua("parameters.lua", self.lua)
998 except: # pylint: disable=bare-except
1001 prox_files = options.get('prox_files', [])
1002 if isinstance(prox_files, six.string_types):
1003 prox_files = [prox_files]
1004 for key_prox_file in prox_files:
1005 base_prox_file = os.path.basename(key_prox_file)
1006 key_prox_path = utils.find_relative_file(key_prox_file, task_path)
1007 remote_prox_file = self.copy_to_target(key_prox_path, base_prox_file)
1008 self.additional_files[base_prox_file] = remote_prox_file
1010 self._prox_config_data = self.generate_prox_config_file(config_path)
1011 # copy config to queue so we can read it from traffic_runner process
1012 self.config_queue.put(self._prox_config_data)
1013 self.remote_path = self.upload_prox_config(config_file, self._prox_config_data)
1015 def build_config(self):
1016 self.build_config_file()
1018 options = self.scenario_helper.options
1019 prox_args = options['prox_args']
1020 tool_path = self.ssh_helper.join_bin_path(self.APP_NAME)
1022 self.pipeline_kwargs = {
1023 'tool_path': tool_path,
1024 'tool_dir': os.path.dirname(tool_path),
1025 'cfg_file': self.remote_path,
1026 'args': ' '.join(' '.join([str(k), str(v) if v else ''])
1027 for k, v in prox_args.items())
1030 cmd_template = ("sudo bash -c 'cd {tool_dir}; {tool_path} -o cli "
1031 "{args} -f {cfg_file} '")
1032 return cmd_template.format(**self.pipeline_kwargs)
1035 # this might be bad, sometimes we want regular ResourceHelper methods, like collect_kpi
1036 class ProxResourceHelper(ClientResourceHelper):
1038 RESOURCE_WORD = 'prox'
1045 def find_pci(pci, bound_pci):
1046 # we have to substring match PCI bus address from the end
1047 return any(b.endswith(pci) for b in bound_pci)
1049 def __init__(self, setup_helper):
1050 super(ProxResourceHelper, self).__init__(setup_helper)
1051 self.mgmt_interface = self.vnfd_helper.mgmt_interface
1052 self._user = self.mgmt_interface["user"]
1053 self._ip = self.mgmt_interface["ip"]
1056 self._vpci_to_if_name_map = None
1057 self.additional_file = {}
1058 self.remote_prox_file_name = None
1062 self.step_time = 0.5
1063 self._test_type = None
1064 self.prev_multi_port = []
1070 self.client = self._connect()
1074 def test_type(self):
1075 if self._test_type is None:
1076 self._test_type = self.setup_helper.find_in_section('global', 'name', None)
1077 return self._test_type
1079 def run_traffic(self, traffic_profile, *args):
1080 self._queue.cancel_join_thread()
1084 traffic_profile.init(self._queue)
1085 # this frees up the run_traffic loop
1086 self.client_started.value = 1
1088 while not self._terminated.value:
1089 # move it all to traffic_profile
1090 self._run_traffic_once(traffic_profile)
1092 def _run_traffic_once(self, traffic_profile):
1093 traffic_profile.execute_traffic(self)
1094 if traffic_profile.done.is_set():
1095 self._queue.put({'done': True})
1096 LOG.debug("tg_prox done")
1097 self._terminated.value = 1
1099 # For VNF use ResourceHelper method to collect KPIs directly.
1100 # for TG leave the superclass ClientResourceHelper collect_kpi_method intact
1101 def collect_collectd_kpi(self):
1102 return self._collect_resource_kpi()
1104 def collect_live_stats(self):
1106 for _, port_num in self.vnfd_helper.ports_iter():
1107 ports.append(port_num)
1109 ok, curr_port_stats = self.sut.multi_port_stats(ports)
1119 new_all_port_stats = \
1120 self.sut.multi_port_stats_diff(self.prev_multi_port, curr_port_stats, hz)
1122 self.prev_multi_port = curr_port_stats
1124 live_stats = self.sut.multi_port_stats_tuple(new_all_port_stats,
1125 self.vnfd_helper.ports_iter())
1126 return True, live_stats
1128 def collect_kpi(self):
1129 result = super(ProxResourceHelper, self).collect_kpi()
1130 # add in collectd kpis manually
1132 result['collect_stats'] = self._collect_resource_kpi()
1134 ok, live_stats = self.collect_live_stats()
1136 result.update({'live_stats': live_stats})
1140 def terminate(self):
1141 # should not be called, use VNF terminate
1142 raise NotImplementedError()
1145 return self.sut # force connection
1147 def execute(self, cmd, *args, **kwargs):
1148 func = getattr(self.sut, cmd, None)
1150 return func(*args, **kwargs)
1153 def _connect(self, client=None):
1154 """Run and connect to prox on the remote system """
1155 # De-allocating a large amount of hugepages takes some time. If a new
1156 # PROX instance is started immediately after killing the previous one,
1157 # it might not be able to allocate hugepages, because they are still
1158 # being freed. Hence the -w switch.
1159 # self.connection.execute("sudo killall -w Prox 2>/dev/null")
1160 # prox_cmd = "export TERM=xterm; cd "+ self.bin_path +"; ./Prox -t
1161 # -f ./handle_none-4.cfg"
1162 # prox_cmd = "export TERM=xterm; export RTE_SDK=" + self._dpdk_dir +
1164 # + "export RTE_TARGET=" + self._dpdk_target + ";" \
1165 # + " cd " + self._prox_dir + "; make HW_DIRECT_STATS=y -j50;
1167 # + "./build/Prox " + prox_args
1168 # log.debug("Starting PROX with command [%s]", prox_cmd)
1169 # thread.start_new_thread(self.ssh_check_quit, (self, self._user,
1170 # self._ip, prox_cmd))
1172 client = ProxSocketHelper()
1174 # try connecting to Prox for 60s
1175 for _ in range(RETRY_SECONDS):
1176 time.sleep(RETRY_INTERVAL)
1178 client.connect(self._ip, PROX_PORT)
1179 except (socket.gaierror, socket.error):
1184 msg = "Failed to connect to prox, please check if system {} accepts connections on port {}"
1185 raise Exception(msg.format(self._ip, PROX_PORT))
1188 class ProxDataHelper(object):
1190 def __init__(self, vnfd_helper, sut, pkt_size, value, tolerated_loss, line_speed):
1191 super(ProxDataHelper, self).__init__()
1192 self.vnfd_helper = vnfd_helper
1194 self.pkt_size = pkt_size
1196 self.line_speed = line_speed
1197 self.tolerated_loss = tolerated_loss
1198 self.port_count = len(self.vnfd_helper.port_pairs.all_ports)
1200 self.measured_stats = None
1202 self._totals_and_pps = None
1203 self.result_tuple = None
1206 def totals_and_pps(self):
1207 if self._totals_and_pps is None:
1208 rx_total = tx_total = 0
1210 timeout = time.time() + constants.RETRY_TIMEOUT
1212 ok, all_ports = self.sut.multi_port_stats([
1213 self.vnfd_helper.port_num(port_name)
1214 for port_name in self.vnfd_helper.port_pairs.all_ports])
1215 if time.time() > timeout:
1218 for port in all_ports:
1219 rx_total = rx_total + port[1]
1220 tx_total = tx_total + port[2]
1221 requested_pps = self.value / 100.0 * self.line_rate_to_pps()
1222 self._totals_and_pps = rx_total, tx_total, requested_pps
1223 return self._totals_and_pps
1228 ret_val = self.totals_and_pps[0]
1229 except (AttributeError, ValueError, TypeError, LookupError):
1236 ret_val = self.totals_and_pps[1]
1237 except (AttributeError, ValueError, TypeError, LookupError):
1242 def requested_pps(self):
1244 ret_val = self.totals_and_pps[2]
1245 except (AttributeError, ValueError, TypeError, LookupError):
1254 for port_name, port_num in self.vnfd_helper.ports_iter():
1255 ports.append(port_num)
1256 port_names[port_num] = port_name
1259 timeout = time.time() + constants.RETRY_TIMEOUT
1261 ok, results = self.sut.multi_port_stats(ports)
1262 if time.time() > timeout:
1265 for result in results:
1266 port_num = result[0]
1268 samples[port_names[port_num]] = {
1269 "in_packets": result[1],
1270 "out_packets": result[2]}
1271 except (IndexError, KeyError):
1275 def __enter__(self):
1276 self.check_interface_count()
1279 def __exit__(self, exc_type, exc_val, exc_tb):
1282 def make_tuple(self):
1283 if self.result_tuple:
1286 self.result_tuple = ProxTestDataTuple(
1287 self.tolerated_loss,
1289 self.measured_stats['delta'].rx,
1290 self.measured_stats['delta'].tx,
1291 self.measured_stats['delta'].tsc,
1297 self.result_tuple.log_data()
1300 def measure_tot_stats(self):
1301 with self.sut.measure_tot_stats() as self.measured_stats:
1304 def check_interface_count(self):
1305 # do this assert in init? unless we expect interface count to
1306 # change from one run to another run...
1307 assert self.port_count in {1, 2, 4}, \
1308 "Invalid number of ports: 1, 2 or 4 ports only supported at this time"
1310 def capture_tsc_hz(self):
1311 self.tsc_hz = float(self.sut.hz())
1313 def line_rate_to_pps(self):
1314 return self.port_count * self.line_speed / BITS_PER_BYTE / (self.pkt_size + 20)
1316 class ProxProfileHelper(object):
1318 __prox_profile_type__ = "Generic"
1320 PROX_CORE_GEN_MODE = "gen"
1321 PROX_CORE_LAT_MODE = "lat"
1324 def get_cls(cls, helper_type):
1325 """Return class of specified type."""
1327 return ProxProfileHelper
1329 for profile_helper_class in utils.itersubclasses(cls):
1330 if helper_type == profile_helper_class.__prox_profile_type__:
1331 return profile_helper_class
1333 return ProxProfileHelper
1336 def make_profile_helper(cls, resource_helper):
1337 return cls.get_cls(resource_helper.test_type)(resource_helper)
1339 def __init__(self, resource_helper):
1340 super(ProxProfileHelper, self).__init__()
1341 self.resource_helper = resource_helper
1342 self._cpu_topology = None
1343 self._test_cores = None
1344 self._latency_cores = None
1347 def cpu_topology(self):
1348 if not self._cpu_topology:
1349 stdout = io.BytesIO()
1350 self.ssh_helper.get_file_obj("/proc/cpuinfo", stdout)
1351 self._cpu_topology = SocketTopology.parse_cpuinfo(stdout.getvalue().decode('utf-8'))
1352 return self._cpu_topology
1355 def test_cores(self):
1356 if not self._test_cores:
1357 self._test_cores = self.get_cores(self.PROX_CORE_GEN_MODE)
1358 return self._test_cores
1361 def latency_cores(self):
1362 if not self._latency_cores:
1363 self._latency_cores = self.get_cores(self.PROX_CORE_LAT_MODE)
1364 return self._latency_cores
1367 def traffic_context(self, pkt_size, value):
1369 self.sut.reset_stats()
1371 self.sut.set_pkt_size(self.test_cores, pkt_size)
1372 self.sut.set_speed(self.test_cores, value)
1373 self.sut.start_all()
1379 def get_cores(self, mode):
1382 for section_name, section in self.setup_helper.prox_config_data:
1383 if not section_name.startswith("core"):
1386 for key, value in section:
1387 if key == "mode" and value == mode:
1388 core_tuple = CoreSocketTuple(section_name)
1389 core = core_tuple.core_id
1394 def pct_10gbps(self, percent, line_speed):
1395 """Get rate in percent of 10 Gbps.
1397 Returns the rate in percent of 10 Gbps.
1398 For instance 100.0 = 10 Gbps; 400.0 = 40 Gbps.
1400 This helper method isrequired when setting interface_speed option in
1401 the testcase because NSB/PROX considers 10Gbps as 100% of line rate,
1402 this means that the line rate must be expressed as a percentage of
1405 :param percent: (float) Percent of line rate (100.0 = line rate).
1406 :param line_speed: (int) line rate speed, in bits per second.
1408 :return: (float) Represents the rate in percent of 10Gbps.
1410 return (percent * line_speed / (
1411 constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT))
1413 def run_test(self, pkt_size, duration, value, tolerated_loss=0.0,
1414 line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
1415 data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size,
1416 value, tolerated_loss, line_speed)
1418 with data_helper, self.traffic_context(pkt_size,
1419 self.pct_10gbps(value, line_speed)):
1420 with data_helper.measure_tot_stats():
1421 time.sleep(duration)
1422 # Getting statistics to calculate PPS at right speed....
1423 data_helper.capture_tsc_hz()
1424 data_helper.latency = self.get_latency()
1426 return data_helper.result_tuple, data_helper.samples
1428 def get_latency(self):
1430 :return: return lat_min, lat_max, lat_avg
1434 if not self._latency_cores:
1435 self._latency_cores = self.get_cores(self.PROX_CORE_LAT_MODE)
1437 if self._latency_cores:
1438 return self.sut.lat_stats(self._latency_cores)
1441 def terminate(self):
1444 def __getattr__(self, item):
1445 return getattr(self.resource_helper, item)
1448 class ProxMplsProfileHelper(ProxProfileHelper):
1450 __prox_profile_type__ = "MPLS tag/untag"
1452 def __init__(self, resource_helper):
1453 super(ProxMplsProfileHelper, self).__init__(resource_helper)
1454 self._cores_tuple = None
1457 def mpls_cores(self):
1458 if not self._cores_tuple:
1459 self._cores_tuple = self.get_cores_mpls()
1460 return self._cores_tuple
1463 def tagged_cores(self):
1464 return self.mpls_cores[0]
1467 def plain_cores(self):
1468 return self.mpls_cores[1]
1470 def get_cores_mpls(self):
1473 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1474 if not section_name.startswith("core"):
1477 if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section):
1480 for item_key, item_value in section:
1481 if item_key != 'name':
1484 if item_value.startswith("tag"):
1485 core_tuple = CoreSocketTuple(section_name)
1486 core_tag = core_tuple.core_id
1487 cores_tagged.append(core_tag)
1489 elif item_value.startswith("udp"):
1490 core_tuple = CoreSocketTuple(section_name)
1491 core_udp = core_tuple.core_id
1492 cores_plain.append(core_udp)
1494 return cores_tagged, cores_plain
1497 def traffic_context(self, pkt_size, value):
1499 self.sut.reset_stats()
1501 self.sut.set_pkt_size(self.tagged_cores, pkt_size)
1502 self.sut.set_pkt_size(self.plain_cores, pkt_size - 4)
1503 self.sut.set_speed(self.tagged_cores, value)
1504 ratio = 1.0 * (pkt_size - 4 + 20) / (pkt_size + 20)
1505 self.sut.set_speed(self.plain_cores, value * ratio)
1506 self.sut.start_all()
1513 class ProxBngProfileHelper(ProxProfileHelper):
1515 __prox_profile_type__ = "BNG gen"
1517 def __init__(self, resource_helper):
1518 super(ProxBngProfileHelper, self).__init__(resource_helper)
1519 self._cores_tuple = None
1522 def bng_cores(self):
1523 if not self._cores_tuple:
1524 self._cores_tuple = self.get_cores_gen_bng_qos()
1525 return self._cores_tuple
1528 def cpe_cores(self):
1529 return self.bng_cores[0]
1532 def inet_cores(self):
1533 return self.bng_cores[1]
1536 def arp_cores(self):
1537 return self.bng_cores[2]
1540 def arp_task_cores(self):
1541 return self.bng_cores[3]
1544 def all_rx_cores(self):
1545 return self.latency_cores
1547 def get_cores_gen_bng_qos(self):
1551 arp_tasks_core = [0]
1552 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1553 if not section_name.startswith("core"):
1556 if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section):
1559 for item_key, item_value in section:
1560 if item_key != 'name':
1563 if item_value.startswith("cpe"):
1564 core_tuple = CoreSocketTuple(section_name)
1565 cpe_core = core_tuple.core_id
1566 cpe_cores.append(cpe_core)
1568 elif item_value.startswith("inet"):
1569 core_tuple = CoreSocketTuple(section_name)
1570 inet_core = core_tuple.core_id
1571 inet_cores.append(inet_core)
1573 elif item_value.startswith("arp"):
1574 core_tuple = CoreSocketTuple(section_name)
1575 arp_core = core_tuple.core_id
1576 arp_cores.append(arp_core)
1578 # We check the tasks/core separately
1579 if item_value.startswith("arp_task"):
1580 core_tuple = CoreSocketTuple(section_name)
1581 arp_task_core = core_tuple.core_id
1582 arp_tasks_core.append(arp_task_core)
1584 return cpe_cores, inet_cores, arp_cores, arp_tasks_core
1587 def traffic_context(self, pkt_size, value):
1588 # Tester is sending packets at the required speed already after
1589 # setup_test(). Just get the current statistics, sleep the required
1590 # amount of time and calculate packet loss.
1591 inet_pkt_size = pkt_size
1592 cpe_pkt_size = pkt_size - 24
1593 ratio = 1.0 * (cpe_pkt_size + 20) / (inet_pkt_size + 20)
1595 curr_up_speed = curr_down_speed = 0
1596 max_up_speed = max_down_speed = value
1598 max_down_speed = value * ratio
1600 max_up_speed = value / ratio
1606 # Flush any packets in the NIC RX buffers, otherwise the stats will be
1608 self.sut.start(self.all_rx_cores)
1610 self.sut.stop(self.all_rx_cores)
1612 self.sut.reset_stats()
1614 self.sut.set_pkt_size(self.inet_cores, inet_pkt_size)
1615 self.sut.set_pkt_size(self.cpe_cores, cpe_pkt_size)
1617 self.sut.reset_values(self.cpe_cores)
1618 self.sut.reset_values(self.inet_cores)
1620 # Set correct IP and UDP lengths in packet headers
1622 # IP length (byte 24): 26 for MAC(12), EthType(2), QinQ(8), CRC(4)
1623 self.sut.set_value(self.cpe_cores, 24, cpe_pkt_size - 26, 2)
1624 # UDP length (byte 46): 46 for MAC(12), EthType(2), QinQ(8), IP(20), CRC(4)
1625 self.sut.set_value(self.cpe_cores, 46, cpe_pkt_size - 46, 2)
1628 # IP length (byte 20): 22 for MAC(12), EthType(2), MPLS(4), CRC(4)
1629 self.sut.set_value(self.inet_cores, 20, inet_pkt_size - 22, 2)
1630 # IP length (byte 48): 50 for MAC(12), EthType(2), MPLS(4), IP(20), GRE(8), CRC(4)
1631 self.sut.set_value(self.inet_cores, 48, inet_pkt_size - 50, 2)
1632 # UDP length (byte 70): 70 for MAC(12), EthType(2), MPLS(4), IP(20), GRE(8), IP(20), CRC(4)
1633 self.sut.set_value(self.inet_cores, 70, inet_pkt_size - 70, 2)
1635 # Sending ARP to initialize tables - need a few seconds of generation
1636 # to make sure all CPEs are initialized
1637 LOG.info("Initializing SUT: sending ARP packets")
1638 self.sut.set_speed(self.arp_cores, 1, self.arp_task_cores)
1639 self.sut.set_speed(self.inet_cores, curr_up_speed)
1640 self.sut.set_speed(self.cpe_cores, curr_down_speed)
1641 self.sut.start(self.arp_cores)
1644 # Ramp up the transmission speed. First go to the common speed, then
1645 # increase steps for the faster one.
1646 self.sut.start(self.cpe_cores + self.inet_cores + self.latency_cores)
1648 LOG.info("Ramping up speed to %s up, %s down", max_up_speed, max_down_speed)
1650 while (curr_up_speed < max_up_speed) or (curr_down_speed < max_down_speed):
1651 # The min(..., ...) takes care of 1) floating point rounding errors
1652 # that could make curr_*_speed to be slightly greater than
1653 # max_*_speed and 2) max_*_speed not being an exact multiple of
1655 if curr_up_speed < max_up_speed:
1656 curr_up_speed = min(curr_up_speed + self.step_delta, max_up_speed)
1657 if curr_down_speed < max_down_speed:
1658 curr_down_speed = min(curr_down_speed + self.step_delta, max_down_speed)
1660 self.sut.set_speed(self.inet_cores, curr_up_speed)
1661 self.sut.set_speed(self.cpe_cores, curr_down_speed)
1662 time.sleep(self.step_time)
1664 LOG.info("Target speeds reached. Starting real test.")
1668 self.sut.stop(self.arp_cores + self.cpe_cores + self.inet_cores)
1669 LOG.info("Test ended. Flushing NIC buffers")
1670 self.sut.start(self.all_rx_cores)
1672 self.sut.stop(self.all_rx_cores)
1674 def run_test(self, pkt_size, duration, value, tolerated_loss=0.0,
1675 line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
1676 data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size,
1677 value, tolerated_loss, line_speed)
1679 with data_helper, self.traffic_context(pkt_size,
1680 self.pct_10gbps(value, line_speed)):
1681 with data_helper.measure_tot_stats():
1682 time.sleep(duration)
1683 # Getting statistics to calculate PPS at right speed....
1684 data_helper.capture_tsc_hz()
1685 data_helper.latency = self.get_latency()
1687 return data_helper.result_tuple, data_helper.samples
1690 class ProxVpeProfileHelper(ProxProfileHelper):
1692 __prox_profile_type__ = "vPE gen"
1694 def __init__(self, resource_helper):
1695 super(ProxVpeProfileHelper, self).__init__(resource_helper)
1696 self._cores_tuple = None
1697 self._ports_tuple = None
1700 def vpe_cores(self):
1701 if not self._cores_tuple:
1702 self._cores_tuple = self.get_cores_gen_vpe()
1703 return self._cores_tuple
1706 def cpe_cores(self):
1707 return self.vpe_cores[0]
1710 def inet_cores(self):
1711 return self.vpe_cores[1]
1714 def all_rx_cores(self):
1715 return self.latency_cores
1718 def vpe_ports(self):
1719 if not self._ports_tuple:
1720 self._ports_tuple = self.get_ports_gen_vpe()
1721 return self._ports_tuple
1724 def cpe_ports(self):
1725 return self.vpe_ports[0]
1728 def inet_ports(self):
1729 return self.vpe_ports[1]
1731 def get_cores_gen_vpe(self):
1734 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1735 if not section_name.startswith("core"):
1738 if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section):
1741 for item_key, item_value in section:
1742 if item_key != 'name':
1745 if item_value.startswith("cpe"):
1746 core_tuple = CoreSocketTuple(section_name)
1747 core_tag = core_tuple.core_id
1748 cpe_cores.append(core_tag)
1750 elif item_value.startswith("inet"):
1751 core_tuple = CoreSocketTuple(section_name)
1752 inet_core = core_tuple.core_id
1753 inet_cores.append(inet_core)
1755 return cpe_cores, inet_cores
1757 def get_ports_gen_vpe(self):
1761 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1762 if not section_name.startswith("port"):
1764 tx_port_iter = re.finditer(r'\d+', section_name)
1765 tx_port_no = int(next(tx_port_iter).group(0))
1767 for item_key, item_value in section:
1768 if item_key != 'name':
1771 if item_value.startswith("cpe"):
1772 cpe_ports.append(tx_port_no)
1774 elif item_value.startswith("inet"):
1775 inet_ports.append(tx_port_no)
1777 return cpe_ports, inet_ports
1780 def traffic_context(self, pkt_size, value):
1781 # Calculate the target upload and download speed. The upload and
1782 # download packets have different packet sizes, so in order to get
1783 # equal bandwidth usage, the ratio of the speeds has to match the ratio
1784 # of the packet sizes.
1785 cpe_pkt_size = pkt_size
1786 inet_pkt_size = pkt_size - 4
1787 ratio = 1.0 * (cpe_pkt_size + 20) / (inet_pkt_size + 20)
1789 curr_up_speed = curr_down_speed = 0
1790 max_up_speed = max_down_speed = value
1792 max_down_speed = value * ratio
1794 max_up_speed = value / ratio
1796 # Adjust speed when multiple cores per port are used to generate traffic
1797 if len(self.cpe_ports) != len(self.cpe_cores):
1798 max_down_speed *= 1.0 * len(self.cpe_ports) / len(self.cpe_cores)
1799 if len(self.inet_ports) != len(self.inet_cores):
1800 max_up_speed *= 1.0 * len(self.inet_ports) / len(self.inet_cores)
1806 # Flush any packets in the NIC RX buffers, otherwise the stats will be
1808 self.sut.start(self.all_rx_cores)
1810 self.sut.stop(self.all_rx_cores)
1812 self.sut.reset_stats()
1814 self.sut.set_pkt_size(self.inet_cores, inet_pkt_size)
1815 self.sut.set_pkt_size(self.cpe_cores, cpe_pkt_size)
1817 self.sut.reset_values(self.cpe_cores)
1818 self.sut.reset_values(self.inet_cores)
1820 # Set correct IP and UDP lengths in packet headers
1821 # CPE: IP length (byte 24): 26 for MAC(12), EthType(2), QinQ(8), CRC(4)
1822 self.sut.set_value(self.cpe_cores, 24, cpe_pkt_size - 26, 2)
1823 # UDP length (byte 46): 46 for MAC(12), EthType(2), QinQ(8), IP(20), CRC(4)
1824 self.sut.set_value(self.cpe_cores, 46, cpe_pkt_size - 46, 2)
1826 # INET: IP length (byte 20): 22 for MAC(12), EthType(2), MPLS(4), CRC(4)
1827 self.sut.set_value(self.inet_cores, 20, inet_pkt_size - 22, 2)
1828 # UDP length (byte 42): 42 for MAC(12), EthType(2), MPLS(4), IP(20), CRC(4)
1829 self.sut.set_value(self.inet_cores, 42, inet_pkt_size - 42, 2)
1831 self.sut.set_speed(self.inet_cores, curr_up_speed)
1832 self.sut.set_speed(self.cpe_cores, curr_down_speed)
1834 # Ramp up the transmission speed. First go to the common speed, then
1835 # increase steps for the faster one.
1836 self.sut.start(self.cpe_cores + self.inet_cores + self.all_rx_cores)
1838 LOG.info("Ramping up speed to %s up, %s down", max_up_speed, max_down_speed)
1840 while (curr_up_speed < max_up_speed) or (curr_down_speed < max_down_speed):
1841 # The min(..., ...) takes care of 1) floating point rounding errors
1842 # that could make curr_*_speed to be slightly greater than
1843 # max_*_speed and 2) max_*_speed not being an exact multiple of
1845 if curr_up_speed < max_up_speed:
1846 curr_up_speed = min(curr_up_speed + self.step_delta, max_up_speed)
1847 if curr_down_speed < max_down_speed:
1848 curr_down_speed = min(curr_down_speed + self.step_delta, max_down_speed)
1850 self.sut.set_speed(self.inet_cores, curr_up_speed)
1851 self.sut.set_speed(self.cpe_cores, curr_down_speed)
1852 time.sleep(self.step_time)
1854 LOG.info("Target speeds reached. Starting real test.")
1858 self.sut.stop(self.cpe_cores + self.inet_cores)
1859 LOG.info("Test ended. Flushing NIC buffers")
1860 self.sut.start(self.all_rx_cores)
1862 self.sut.stop(self.all_rx_cores)
1864 def run_test(self, pkt_size, duration, value, tolerated_loss=0.0,
1865 line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
1866 data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size,
1867 value, tolerated_loss, line_speed)
1869 with data_helper, self.traffic_context(pkt_size,
1870 self.pct_10gbps(value, line_speed)):
1871 with data_helper.measure_tot_stats():
1872 time.sleep(duration)
1873 # Getting statistics to calculate PPS at right speed....
1874 data_helper.capture_tsc_hz()
1875 data_helper.latency = self.get_latency()
1877 return data_helper.result_tuple, data_helper.samples
1880 class ProxlwAFTRProfileHelper(ProxProfileHelper):
1882 __prox_profile_type__ = "lwAFTR gen"
1884 def __init__(self, resource_helper):
1885 super(ProxlwAFTRProfileHelper, self).__init__(resource_helper)
1886 self._cores_tuple = None
1887 self._ports_tuple = None
1889 self.step_time = 0.5
1892 def _lwaftr_cores(self):
1893 if not self._cores_tuple:
1894 self._cores_tuple = self._get_cores_gen_lwaftr()
1895 return self._cores_tuple
1898 def tun_cores(self):
1899 return self._lwaftr_cores[0]
1902 def inet_cores(self):
1903 return self._lwaftr_cores[1]
1906 def _lwaftr_ports(self):
1907 if not self._ports_tuple:
1908 self._ports_tuple = self._get_ports_gen_lw_aftr()
1909 return self._ports_tuple
1912 def tun_ports(self):
1913 return self._lwaftr_ports[0]
1916 def inet_ports(self):
1917 return self._lwaftr_ports[1]
1920 def all_rx_cores(self):
1921 return self.latency_cores
1923 def _get_cores_gen_lwaftr(self):
1926 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1927 if not section_name.startswith("core"):
1930 if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section):
1933 core_tuple = CoreSocketTuple(section_name)
1934 core_tag = core_tuple.core_id
1935 for item_value in (v for k, v in section if k == 'name'):
1936 if item_value.startswith('tun'):
1937 tun_cores.append(core_tag)
1938 elif item_value.startswith('inet'):
1939 inet_cores.append(core_tag)
1941 return tun_cores, inet_cores
1943 def _get_ports_gen_lw_aftr(self):
1947 re_port = re.compile(r'port (\d+)')
1948 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1949 match = re_port.search(section_name)
1953 tx_port_no = int(match.group(1))
1954 for item_value in (v for k, v in section if k == 'name'):
1955 if item_value.startswith('lwB4'):
1956 tun_ports.append(tx_port_no)
1957 elif item_value.startswith('inet'):
1958 inet_ports.append(tx_port_no)
1960 return tun_ports, inet_ports
1963 def _resize(len1, len2):
1966 return 1.0 * len1 / len2
1969 def traffic_context(self, pkt_size, value):
1970 # Tester is sending packets at the required speed already after
1971 # setup_test(). Just get the current statistics, sleep the required
1972 # amount of time and calculate packet loss.
1973 tun_pkt_size = pkt_size
1974 inet_pkt_size = pkt_size - 40
1975 ratio = 1.0 * (tun_pkt_size + 20) / (inet_pkt_size + 20)
1977 curr_up_speed = curr_down_speed = 0
1978 max_up_speed = max_down_speed = value
1980 max_up_speed = value / ratio
1982 # Adjust speed when multiple cores per port are used to generate traffic
1983 if len(self.tun_ports) != len(self.tun_cores):
1984 max_down_speed *= self._resize(len(self.tun_ports), len(self.tun_cores))
1985 if len(self.inet_ports) != len(self.inet_cores):
1986 max_up_speed *= self._resize(len(self.inet_ports), len(self.inet_cores))
1992 # Flush any packets in the NIC RX buffers, otherwise the stats will be
1994 self.sut.start(self.all_rx_cores)
1996 self.sut.stop(self.all_rx_cores)
1998 self.sut.reset_stats()
2000 self.sut.set_pkt_size(self.inet_cores, inet_pkt_size)
2001 self.sut.set_pkt_size(self.tun_cores, tun_pkt_size)
2003 self.sut.reset_values(self.tun_cores)
2004 self.sut.reset_values(self.inet_cores)
2006 # Set correct IP and UDP lengths in packet headers
2008 # IPv6 length (byte 18): 58 for MAC(12), EthType(2), IPv6(40) , CRC(4)
2009 self.sut.set_value(self.tun_cores, 18, tun_pkt_size - 58, 2)
2010 # IP length (byte 56): 58 for MAC(12), EthType(2), CRC(4)
2011 self.sut.set_value(self.tun_cores, 56, tun_pkt_size - 58, 2)
2012 # UDP length (byte 78): 78 for MAC(12), EthType(2), IP(20), UDP(8), CRC(4)
2013 self.sut.set_value(self.tun_cores, 78, tun_pkt_size - 78, 2)
2016 # IP length (byte 20): 22 for MAC(12), EthType(2), CRC(4)
2017 self.sut.set_value(self.inet_cores, 16, inet_pkt_size - 18, 2)
2018 # UDP length (byte 42): 42 for MAC(12), EthType(2), IP(20), UPD(8), CRC(4)
2019 self.sut.set_value(self.inet_cores, 38, inet_pkt_size - 38, 2)
2021 LOG.info("Initializing SUT: sending lwAFTR packets")
2022 self.sut.set_speed(self.inet_cores, curr_up_speed)
2023 self.sut.set_speed(self.tun_cores, curr_down_speed)
2026 # Ramp up the transmission speed. First go to the common speed, then
2027 # increase steps for the faster one.
2028 self.sut.start(self.tun_cores + self.inet_cores + self.latency_cores)
2030 LOG.info("Ramping up speed to %s up, %s down", max_up_speed, max_down_speed)
2032 while (curr_up_speed < max_up_speed) or (curr_down_speed < max_down_speed):
2033 # The min(..., ...) takes care of 1) floating point rounding errors
2034 # that could make curr_*_speed to be slightly greater than
2035 # max_*_speed and 2) max_*_speed not being an exact multiple of
2037 if curr_up_speed < max_up_speed:
2038 curr_up_speed = min(curr_up_speed + self.step_delta, max_up_speed)
2039 if curr_down_speed < max_down_speed:
2040 curr_down_speed = min(curr_down_speed + self.step_delta, max_down_speed)
2042 self.sut.set_speed(self.inet_cores, curr_up_speed)
2043 self.sut.set_speed(self.tun_cores, curr_down_speed)
2044 time.sleep(self.step_time)
2046 LOG.info("Target speeds reached. Starting real test.")
2050 self.sut.stop(self.tun_cores + self.inet_cores)
2051 LOG.info("Test ended. Flushing NIC buffers")
2052 self.sut.start(self.all_rx_cores)
2054 self.sut.stop(self.all_rx_cores)
2056 def run_test(self, pkt_size, duration, value, tolerated_loss=0.0,
2057 line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
2058 data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size,
2059 value, tolerated_loss, line_speed)
2061 with data_helper, self.traffic_context(pkt_size,
2062 self.pct_10gbps(value, line_speed)):
2063 with data_helper.measure_tot_stats():
2064 time.sleep(duration)
2065 # Getting statistics to calculate PPS at right speed....
2066 data_helper.capture_tsc_hz()
2067 data_helper.latency = self.get_latency()
2069 return data_helper.result_tuple, data_helper.samples