1 # Copyright (c) 2017 Intel Corporation
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
24 from collections import OrderedDict, namedtuple
25 from contextlib import contextmanager
26 from itertools import repeat, chain
27 from multiprocessing import Queue
30 from six.moves import cStringIO
31 from six.moves import zip, StringIO
33 from yardstick.common import utils
34 from yardstick.common.utils import SocketTopology, join_non_strings, try_int
35 from yardstick.network_services.helpers.iniparser import ConfigParser
36 from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
37 from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
38 from yardstick.network_services import constants
45 LOG = logging.getLogger(__name__)
46 LOG.setLevel(logging.DEBUG)
47 LOG_RESULT = logging.getLogger('yardstick')
48 LOG_RESULT.setLevel(logging.DEBUG)
54 CONFIGURATION_OPTIONS = (
55 # dict key section key default value
56 ('pktSizes', 'general', 'pkt_sizes', '64,128,256,512,1024,1280,1518'),
57 ('testDuration', 'general', 'test_duration', 5.0),
58 ('testPrecision', 'general', 'test_precision', 1.0),
59 ('tests', 'general', 'tests', None),
60 ('toleratedLoss', 'general', 'tolerated_loss', 0.0),
62 ('logFile', 'logging', 'file', 'dats.log'),
63 ('logDateFormat', 'logging', 'datefmt', None),
64 ('logLevel', 'logging', 'level', 'INFO'),
65 ('logOverwrite', 'logging', 'overwrite', 1),
67 ('testerIp', 'tester', 'ip', None),
68 ('testerUser', 'tester', 'user', 'root'),
69 ('testerDpdkDir', 'tester', 'rte_sdk', '/root/dpdk'),
70 ('testerDpdkTgt', 'tester', 'rte_target', 'x86_64-native-linuxapp-gcc'),
71 ('testerProxDir', 'tester', 'prox_dir', '/root/prox'),
72 ('testerSocketId', 'tester', 'socket_id', 0),
74 ('sutIp', 'sut', 'ip', None),
75 ('sutUser', 'sut', 'user', 'root'),
76 ('sutDpdkDir', 'sut', 'rte_sdk', '/root/dpdk'),
77 ('sutDpdkTgt', 'sut', 'rte_target', 'x86_64-native-linuxapp-gcc'),
78 ('sutProxDir', 'sut', 'prox_dir', '/root/prox'),
79 ('sutSocketId', 'sut', 'socket_id', 0),
83 class CoreSocketTuple(namedtuple('CoreTuple', 'core_id, socket_id, hyperthread')):
84 CORE_RE = re.compile(r"core\s+(\d+)(?:s(\d+))?(h)?$")
86 def __new__(cls, *args):
88 matches = cls.CORE_RE.search(str(args[0]))
90 args = matches.groups()
92 return super(CoreSocketTuple, cls).__new__(cls, int(args[0]), try_int(args[1], 0),
93 'h' if args[2] else '')
95 except (AttributeError, TypeError, IndexError, ValueError):
96 raise ValueError('Invalid core spec {}'.format(args))
98 def is_hyperthread(self):
99 return self.hyperthread == 'h'
103 return int(self.is_hyperthread())
105 def find_in_topology(self, cpu_topology):
107 socket_core_match = cpu_topology[self.socket_id][self.core_id]
108 sorted_match = sorted(socket_core_match.values())
109 return sorted_match[self.index][0]
110 except (KeyError, IndexError):
111 template = "Core {}{} on socket {} does not exist"
112 raise ValueError(template.format(self.core_id, self.hyperthread, self.socket_id))
115 class TotStatsTuple(namedtuple('TotStats', 'rx,tx,tsc,hz')):
116 def __new__(cls, *args):
118 assert args[0] is not str(args[0])
119 args = tuple(args[0])
120 except (AssertionError, IndexError, TypeError):
123 return super(TotStatsTuple, cls).__new__(cls, *args)
126 class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_rx,'
127 'delta_tx,delta_tsc,'
128 'latency,rx_total,tx_total,'
133 return 1e2 * self.drop_total / float(self.tx_total)
134 except ZeroDivisionError:
139 # calculate the effective throughput in Mpps
140 return float(self.delta_tx) * self.tsc_hz / self.delta_tsc / 1e6
144 # calculate the effective throughput in Mpps
145 return float(self.delta_rx) * self.tsc_hz / self.delta_tsc / 1e6
148 def can_be_lost(self):
149 return int(self.tx_total * self.tolerated / 1e2)
152 def drop_total(self):
153 return self.tx_total - self.rx_total
157 return self.drop_total <= self.can_be_lost
159 def get_samples(self, pkt_size, pkt_loss=None, port_samples=None):
161 pkt_loss = self.pkt_loss
163 if port_samples is None:
173 "Throughput": self.rx_mpps,
174 "RxThroughput": self.rx_mpps,
175 "DropPackets": pkt_loss,
176 "CurrentDropPackets": pkt_loss,
177 "RequestedTxThroughput": self.requested_pps / 1e6,
178 "TxThroughput": self.tx_mpps,
182 samples.update(port_samples)
184 samples.update((key, value) for key, value in zip(latency_keys, self.latency))
187 def log_data(self, logger=None):
191 template = "RX: %d; TX: %d; dropped: %d (tolerated: %d)"
192 logger.info(template, self.rx_total, self.tx_total, self.drop_total, self.can_be_lost)
193 logger.info("Mpps configured: %f; Mpps generated %f; Mpps received %f",
194 self.requested_pps / 1e6, self.tx_mpps, self.rx_mpps)
197 class PacketDump(object):
199 def assert_func(func, value1, value2, template=None):
200 assert func(value1, value2), template.format(value1, value2)
202 def __init__(self, port_id, data_len, payload):
203 template = "Packet dump has specified length {}, but payload is {} bytes long"
204 self.assert_func(operator.eq, data_len, len(payload), template)
205 self._port_id = port_id
206 self._data_len = data_len
207 self._payload = payload
211 """Get the port id of the packet dump"""
216 """Get the length of the data received"""
217 return self._data_len
220 return '<PacketDump port: {} payload: {}>'.format(self._port_id, self._payload)
222 def payload(self, start=None, end=None):
223 """Get part of the payload as a list of ordinals.
225 Returns a list of byte values, matching the contents of the packet dump.
226 Optional start and end parameters can be specified to retrieve only a
227 part of the packet contents.
229 The number of elements in the list is equal to end - start + 1, so end
230 is the offset of the last character.
233 start (pos. int): the starting offset in the payload. If it is not
234 specified or None, offset 0 is assumed.
235 end (pos. int): the ending offset of the payload. If it is not
236 specified or None, the contents until the end of the packet are
240 [int, int, ...]. Each int represents the ordinal value of a byte in
247 end = self.data_len - 1
249 # Bounds checking on offsets
250 template = "Start offset must be non-negative"
251 self.assert_func(operator.ge, start, 0, template)
253 template = "End offset must be less than {1}"
254 self.assert_func(operator.lt, end, self.data_len, template)
256 # Adjust for splice operation: end offset must be 1 more than the offset
257 # of the last desired character.
260 return self._payload[start:end]
263 class ProxSocketHelper(object):
265 def __init__(self, sock=None):
266 """ creates new prox instance """
267 super(ProxSocketHelper, self).__init__()
270 sock = socket.socket()
274 self.master_stats = None
276 def connect(self, ip, port):
277 """Connect to the prox instance on the remote system"""
278 self._sock.connect((ip, port))
280 def get_socket(self):
281 """ get the socket connected to the remote instance """
284 def _parse_socket_data(self, decoded_data, pkt_dump_only):
285 def get_newline_index():
286 return decoded_data.find('\n', index)
290 for newline_index in iter(get_newline_index, -1):
291 ret_str = decoded_data[index:newline_index]
294 mode, port_id, data_len = ret_str.split(',', 2)
296 mode, port_id, data_len = None, None, None
298 if mode != 'pktdump':
299 # Regular 1-line message. Stop reading from the socket.
300 LOG.debug("Regular response read")
303 LOG.debug("Packet dump header read: [%s]", ret_str)
305 # The line is a packet dump header. Parse it, read the
306 # packet payload, store the dump for later retrieval.
307 # Skip over the packet dump and continue processing: a
308 # 1-line response may follow the packet dump.
310 data_len = int(data_len)
311 data_start = newline_index + 1 # + 1 to skip over \n
312 data_end = data_start + data_len
313 sub_data = decoded_data[data_start:data_end]
314 pkt_payload = array.array('B', (ord(v) for v in sub_data))
315 pkt_dump = PacketDump(int(port_id), data_len, pkt_payload)
316 self._pkt_dumps.append(pkt_dump)
319 # Return boolean instead of string to signal
320 # successful reception of the packet dump.
321 LOG.debug("Packet dump stored, returning")
326 return ret_str, False
328 def get_data(self, pkt_dump_only=False, timeout=0.01):
329 """ read data from the socket """
331 # This method behaves slightly differently depending on whether it is
332 # called to read the response to a command (pkt_dump_only = 0) or if
333 # it is called specifically to read a packet dump (pkt_dump_only = 1).
335 # Packet dumps look like:
336 # pktdump,<port_id>,<data_len>\n
337 # <packet contents as byte array>\n
338 # This means the total packet dump message consists of 2 lines instead
341 # - Response for a command (pkt_dump_only = 0):
342 # 1) Read response from the socket until \n (end of message)
343 # 2a) If the response is a packet dump header (starts with "pktdump,"):
344 # - Read the packet payload and store the packet dump for later
346 # - Reset the state and restart from 1). Eventually state 2b) will
347 # be reached and the function will return.
348 # 2b) If the response is not a packet dump:
349 # - Return the received message as a string
351 # - Explicit request to read a packet dump (pkt_dump_only = 1):
352 # - Read the dump header and payload
353 # - Store the packet dump for later retrieval
354 # - Return True to signify a packet dump was successfully read
357 # recv() is blocking, so avoid calling it when no data is waiting.
358 ready = select.select([self._sock], [], [], timeout)
359 return bool(ready[0])
363 for status in iter(is_ready, False):
364 decoded_data = self._sock.recv(256).decode('utf-8')
365 ret_str, done = self._parse_socket_data(decoded_data, pkt_dump_only)
369 LOG.debug("Received data from socket: [%s]", ret_str)
370 return ret_str if status else ''
372 def put_command(self, to_send):
373 """ send data to the remote instance """
374 LOG.debug("Sending data to socket: [%s]", to_send.rstrip('\n'))
376 # NOTE: sendall will block, we need a timeout
377 self._sock.sendall(to_send.encode('utf-8'))
378 except: # pylint: disable=bare-except
381 def get_packet_dump(self):
382 """ get the next packet dump """
384 return self._pkt_dumps.pop(0)
387 def stop_all_reset(self):
388 """ stop the remote instance and reset stats """
389 LOG.debug("Stop all and reset stats")
394 """ stop all cores on the remote instance """
395 LOG.debug("Stop all")
396 self.put_command("stop all\n")
399 def stop(self, cores, task=''):
400 """ stop specific cores on the remote instance """
404 if core not in tmpcores:
405 tmpcores.append(core)
407 LOG.debug("Stopping cores %s", tmpcores)
408 self.put_command("stop {} {}\n".format(join_non_strings(',', tmpcores), task))
412 """ start all cores on the remote instance """
413 LOG.debug("Start all")
414 self.put_command("start all\n")
416 def start(self, cores):
417 """ start specific cores on the remote instance """
421 if core not in tmpcores:
422 tmpcores.append(core)
424 LOG.debug("Starting cores %s", tmpcores)
425 self.put_command("start {}\n".format(join_non_strings(',', tmpcores)))
428 def reset_stats(self):
429 """ reset the statistics on the remote instance """
430 LOG.debug("Reset stats")
431 self.put_command("reset stats\n")
434 def _run_template_over_cores(self, template, cores, *args):
436 self.put_command(template.format(core, *args))
438 def set_pkt_size(self, cores, pkt_size):
439 """ set the packet size to generate on the remote instance """
440 LOG.debug("Set packet size for core(s) %s to %d", cores, pkt_size)
442 self._run_template_over_cores("pkt_size {} 0 {}\n", cores, pkt_size)
445 def set_value(self, cores, offset, value, length):
446 """ set value on the remote instance """
447 msg = "Set value for core(s) %s to '%s' (length %d), offset %d"
448 LOG.debug(msg, cores, value, length, offset)
449 template = "set value {} 0 {} {} {}\n"
450 self._run_template_over_cores(template, cores, offset, value, length)
452 def reset_values(self, cores):
453 """ reset values on the remote instance """
454 LOG.debug("Set value for core(s) %s", cores)
455 self._run_template_over_cores("reset values {} 0\n", cores)
457 def set_speed(self, cores, speed, tasks=None):
458 """ set speed on the remote instance """
460 tasks = [0] * len(cores)
461 elif len(tasks) != len(cores):
462 LOG.error("set_speed: cores and tasks must have the same len")
463 LOG.debug("Set speed for core(s)/tasks(s) %s to %g", list(zip(cores, tasks)), speed)
464 for (core, task) in list(zip(cores, tasks)):
465 self.put_command("speed {} {} {}\n".format(core, task, speed))
467 def slope_speed(self, cores_speed, duration, n_steps=0):
468 """will start to increase speed from 0 to N where N is taken from
469 a['speed'] for each a in cores_speed"""
470 # by default, each step will take 0.5 sec
472 n_steps = duration * 2
474 private_core_data = []
475 step_duration = float(duration) / n_steps
476 for core_data in cores_speed:
477 target = float(core_data['speed'])
478 private_core_data.append({
479 'cores': core_data['cores'],
481 'delta': target / n_steps,
486 deltas_keys_iter = repeat(('current', 'delta'), n_steps - 1)
487 for key1, key2 in chain(deltas_keys_iter, [('zero', 'speed')]):
488 time.sleep(step_duration)
489 for core_data in private_core_data:
490 core_data['current'] = core_data[key1] + core_data[key2]
491 self.set_speed(core_data['cores'], core_data['current'])
493 def set_pps(self, cores, pps, pkt_size,
494 line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
495 """ set packets per second for specific cores on the remote instance """
496 msg = "Set packets per sec for core(s) %s to %g%% of line rate (packet size: %d)"
497 LOG.debug(msg, cores, pps, pkt_size)
499 # speed in percent of line-rate
500 speed = float(pps) * (pkt_size + 20) / line_speed / BITS_PER_BYTE
501 self._run_template_over_cores("speed {} 0 {}\n", cores, speed)
503 def lat_stats(self, cores, task=0):
504 """Get the latency statistics from the remote system"""
505 # 1-based index, if max core is 4, then 0, 1, 2, 3, 4 len = 5
510 self.put_command("lat stats {} {} \n".format(core, task))
511 ret = self.get_data()
514 lat_min[core], lat_max[core], lat_avg[core] = \
515 tuple(int(n) for n in ret.split(",")[:3])
517 except (AttributeError, ValueError, TypeError):
520 return lat_min, lat_max, lat_avg
522 def get_all_tot_stats(self):
523 self.put_command("tot stats\n")
524 all_stats_str = self.get_data().split(",")
525 if len(all_stats_str) != 4:
528 all_stats = TotStatsTuple(int(v) for v in all_stats_str)
529 self.master_stats = all_stats
533 return self.get_all_tot_stats()[3]
535 def core_stats(self, cores, task=0):
536 """Get the receive statistics from the remote system"""
537 rx = tx = drop = tsc = 0
539 self.put_command("core stats {} {}\n".format(core, task))
540 ret = self.get_data().split(",")
545 return rx, tx, drop, tsc
547 def multi_port_stats(self, ports):
548 """get counter values from all ports port"""
552 ports_str = ports_str + str(port) + ","
553 ports_str = ports_str[:-1]
556 tot_result = [0] * len(ports)
560 while (len(ports) is not len(ports_all_data)) and (retry_counter < 10):
561 self.put_command("multi port stats {}\n".format(ports_str))
562 ports_all_data = self.get_data().split(";")
564 if len(ports) is len(ports_all_data):
565 for port_data_str in ports_all_data:
568 tot_result[port_index] = [try_int(s, 0) for s in port_data_str.split(",")]
569 except (IndexError, TypeError):
570 LOG.error("Port Index error %d %s - retrying ", port_index, port_data_str)
572 if (len(tot_result[port_index]) is not 6) or \
573 tot_result[port_index][0] is not ports[port_index]:
575 tot_result = [0] * len(ports)
578 LOG.error("Corrupted PACKET %s - retrying", port_data_str)
581 port_index = port_index + 1
583 LOG.error("Empty / too much data - retry -%s-", ports_all_data)
585 tot_result = [0] * len(ports)
589 retry_counter = retry_counter + 1
592 def port_stats(self, ports):
593 """get counter values from a specific port"""
594 tot_result = [0] * 12
596 self.put_command("port_stats {}\n".format(port))
597 ret = [try_int(s, 0) for s in self.get_data().split(",")]
598 tot_result = [sum(x) for x in zip(tot_result, ret)]
602 def measure_tot_stats(self):
603 start = self.get_all_tot_stats()
604 container = {'start_tot': start}
608 container['end_tot'] = end = self.get_all_tot_stats()
610 container['delta'] = TotStatsTuple(e - s for s, e in zip(start, end))
613 """Get the total statistics from the remote system"""
614 stats = self.get_all_tot_stats()
617 def tot_ierrors(self):
618 """Get the total ierrors from the remote system"""
619 self.put_command("tot ierrors tot\n")
620 recv = self.get_data().split(',')
621 tot_ierrors = int(recv[0])
623 return tot_ierrors, tsc
625 def set_count(self, count, cores):
626 """Set the number of packets to send on the specified core"""
627 self._run_template_over_cores("count {} 0 {}\n", cores, count)
629 def dump_rx(self, core_id, task_id=0, count=1):
630 """Activate dump on rx on the specified core"""
631 LOG.debug("Activating dump on RX for core %d, task %d, count %d", core_id, task_id, count)
632 self.put_command("dump_rx {} {} {}\n".format(core_id, task_id, count))
633 time.sleep(1.5) # Give PROX time to set up packet dumping
641 """ stop all cores on the remote instance """
642 LOG.debug("Quit prox")
643 self.put_command("quit\n")
646 def force_quit(self):
647 """ stop all cores on the remote instance """
648 LOG.debug("Force Quit prox")
649 self.put_command("quit_force\n")
653 _LOCAL_OBJECT = object()
656 class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper):
657 # the actual app is lowercase
659 # not used for Prox but added for consistency
662 LUA_PARAMETER_NAME = ""
663 LUA_PARAMETER_PEER = {
668 CONFIG_QUEUE_TIMEOUT = 120
670 def __init__(self, vnfd_helper, ssh_helper, scenario_helper):
671 self.remote_path = None
672 super(ProxDpdkVnfSetupEnvHelper, self).__init__(vnfd_helper, ssh_helper, scenario_helper)
673 self.remote_prox_file_name = None
674 self._prox_config_data = None
675 self.additional_files = {}
676 self.config_queue = Queue()
677 # allow_exit_without_flush
678 self.config_queue.cancel_join_thread()
679 self._global_section = None
682 def prox_config_data(self):
683 if self._prox_config_data is None:
684 # this will block, but it needs too
685 self._prox_config_data = self.config_queue.get(True, self.CONFIG_QUEUE_TIMEOUT)
686 return self._prox_config_data
689 def global_section(self):
690 if self._global_section is None and self.prox_config_data:
691 self._global_section = self.find_section("global")
692 return self._global_section
694 def find_section(self, name, default=_LOCAL_OBJECT):
695 result = next((value for key, value in self.prox_config_data if key == name), default)
696 if result is _LOCAL_OBJECT:
697 raise KeyError('{} not found in Prox config'.format(name))
700 def find_in_section(self, section_name, section_key, default=_LOCAL_OBJECT):
701 section = self.find_section(section_name, [])
702 result = next((value for key, value in section if key == section_key), default)
703 if result is _LOCAL_OBJECT:
704 template = '{} not found in {} section of Prox config'
705 raise KeyError(template.format(section_key, section_name))
708 def copy_to_target(self, config_file_path, prox_file):
709 remote_path = os.path.join("/tmp", prox_file)
710 self.ssh_helper.put(config_file_path, remote_path)
714 def _get_tx_port(section, sections):
716 for item in sections[section]:
717 if item[0] == "tx port":
718 iface_port = re.findall(r'\d+', item[1])
719 # do we want the last one?
720 # if yes, then can we reverse?
721 return int(iface_port[0])
724 def _replace_quoted_with_value(quoted, value, count=1):
725 new_string = re.sub('"[^"]*"', '"{}"'.format(value), quoted, count)
728 def _insert_additional_file(self, value):
729 file_str = value.split('"')
730 base_name = os.path.basename(file_str[1])
731 file_str[1] = self.additional_files[base_name]
732 return '"'.join(file_str)
734 def generate_prox_config_file(self, config_path):
736 prox_config = ConfigParser(config_path, sections)
739 # Ensure MAC is set "hardware"
740 all_ports = self.vnfd_helper.port_pairs.all_ports
741 # use dpdk port number
742 for port_name in all_ports:
743 port_num = self.vnfd_helper.port_num(port_name)
744 port_section_name = "port {}".format(port_num)
745 for section_name, section in sections:
746 if port_section_name != section_name:
749 for section_data in section:
750 if section_data[0] == "mac":
751 section_data[1] = "hardware"
754 for _, section in sections:
755 for section_data in section:
756 item_key, item_val = section_data
757 if item_val.startswith("@@dst_mac"):
758 tx_port_iter = re.finditer(r'\d+', item_val)
759 tx_port_no = int(next(tx_port_iter).group(0))
760 intf = self.vnfd_helper.find_interface_by_port(tx_port_no)
761 mac = intf["virtual-interface"]["dst_mac"]
762 section_data[1] = mac.replace(":", " ", 6)
764 if item_key == "dst mac" and item_val.startswith("@@"):
765 tx_port_iter = re.finditer(r'\d+', item_val)
766 tx_port_no = int(next(tx_port_iter).group(0))
767 intf = self.vnfd_helper.find_interface_by_port(tx_port_no)
768 mac = intf["virtual-interface"]["dst_mac"]
769 section_data[1] = mac
771 if item_val.startswith("@@src_mac"):
772 tx_port_iter = re.finditer(r'\d+', item_val)
773 tx_port_no = int(next(tx_port_iter).group(0))
774 intf = self.vnfd_helper.find_interface_by_port(tx_port_no)
775 mac = intf["virtual-interface"]["local_mac"]
776 section_data[1] = mac.replace(":", " ", 6)
778 if item_key == "src mac" and item_val.startswith("@@"):
779 tx_port_iter = re.finditer(r'\d+', item_val)
780 tx_port_no = int(next(tx_port_iter).group(0))
781 intf = self.vnfd_helper.find_interface_by_port(tx_port_no)
782 mac = intf["virtual-interface"]["local_mac"]
783 section_data[1] = mac
785 # if addition file specified in prox config
786 if not self.additional_files:
789 for section_name, section in sections:
790 for section_data in section:
792 if section_data[0].startswith("dofile"):
793 section_data[0] = self._insert_additional_file(section_data[0])
795 if section_data[1].startswith("dofile"):
796 section_data[1] = self._insert_additional_file(section_data[1])
797 except: # pylint: disable=bare-except
803 def write_prox_lua(lua_config):
805 Write an .ini-format config file for PROX (parameters.lua)
806 PROX does not allow a space before/after the =, so we need
810 for key in lua_config:
811 value = '"' + lua_config[key] + '"'
812 if key == "__name__":
814 if value is not None and value != '@':
815 key = "=".join((key, str(value).replace('\n', '\n\t')))
818 key = str(key).replace('\n', '\n\t')
820 return os.linesep.join(out)
823 def write_prox_config(prox_config):
825 Write an .ini-format config file for PROX
826 PROX does not allow a space before/after the =, so we need
830 for (section_name, section) in prox_config:
831 out.append("[{}]".format(section_name))
834 if key == "__name__":
836 if value is not None and value != '@':
837 key = "=".join((key, str(value).replace('\n', '\n\t')))
840 key = str(key).replace('\n', '\n\t')
842 return os.linesep.join(out)
844 def put_string_to_file(self, s, remote_path):
845 file_obj = cStringIO(s)
846 self.ssh_helper.put_file_obj(file_obj, remote_path)
849 def generate_prox_lua_file(self):
851 all_ports = self.vnfd_helper.port_pairs.all_ports
852 for port_name in all_ports:
853 port_num = self.vnfd_helper.port_num(port_name)
854 intf = self.vnfd_helper.find_interface(name=port_name)
855 vintf = intf['virtual-interface']
856 p["tester_mac{0}".format(port_num)] = vintf["dst_mac"]
857 p["src_mac{0}".format(port_num)] = vintf["local_mac"]
861 def upload_prox_lua(self, config_file, lua_data):
862 # prox can't handle spaces around ' = ' so use custom method
863 out = StringIO(self.write_prox_lua(lua_data))
865 remote_path = os.path.join("/tmp", config_file)
866 self.ssh_helper.put_file_obj(out, remote_path)
870 def upload_prox_config(self, config_file, prox_config_data):
871 # prox can't handle spaces around ' = ' so use custom method
872 out = StringIO(self.write_prox_config(prox_config_data))
874 remote_path = os.path.join("/tmp", config_file)
875 self.ssh_helper.put_file_obj(out, remote_path)
879 def build_config_file(self):
880 task_path = self.scenario_helper.task_path
881 options = self.scenario_helper.options
882 config_path = options['prox_config']
883 config_file = os.path.basename(config_path)
884 config_path = utils.find_relative_file(config_path, task_path)
885 self.additional_files = {}
888 if options['prox_generate_parameter']:
890 self.lua = self.generate_prox_lua_file()
891 if len(self.lua) > 0:
892 self.upload_prox_lua("parameters.lua", self.lua)
893 except: # pylint: disable=bare-except
896 prox_files = options.get('prox_files', [])
897 if isinstance(prox_files, six.string_types):
898 prox_files = [prox_files]
899 for key_prox_file in prox_files:
900 base_prox_file = os.path.basename(key_prox_file)
901 key_prox_path = utils.find_relative_file(key_prox_file, task_path)
902 remote_prox_file = self.copy_to_target(key_prox_path, base_prox_file)
903 self.additional_files[base_prox_file] = remote_prox_file
905 self._prox_config_data = self.generate_prox_config_file(config_path)
906 # copy config to queue so we can read it from traffic_runner process
907 self.config_queue.put(self._prox_config_data)
908 self.remote_path = self.upload_prox_config(config_file, self._prox_config_data)
910 def build_config(self):
911 self.build_config_file()
913 options = self.scenario_helper.options
914 prox_args = options['prox_args']
915 tool_path = self.ssh_helper.join_bin_path(self.APP_NAME)
917 self.pipeline_kwargs = {
918 'tool_path': tool_path,
919 'tool_dir': os.path.dirname(tool_path),
920 'cfg_file': self.remote_path,
921 'args': ' '.join(' '.join([str(k), str(v) if v else ''])
922 for k, v in prox_args.items())
925 cmd_template = ("sudo bash -c 'cd {tool_dir}; {tool_path} -o cli "
926 "{args} -f {cfg_file} '")
927 return cmd_template.format(**self.pipeline_kwargs)
930 # this might be bad, sometimes we want regular ResourceHelper methods, like collect_kpi
931 class ProxResourceHelper(ClientResourceHelper):
933 RESOURCE_WORD = 'prox'
940 def find_pci(pci, bound_pci):
941 # we have to substring match PCI bus address from the end
942 return any(b.endswith(pci) for b in bound_pci)
944 def __init__(self, setup_helper):
945 super(ProxResourceHelper, self).__init__(setup_helper)
946 self.mgmt_interface = self.vnfd_helper.mgmt_interface
947 self._user = self.mgmt_interface["user"]
948 self._ip = self.mgmt_interface["ip"]
951 self._vpci_to_if_name_map = None
952 self.additional_file = {}
953 self.remote_prox_file_name = None
958 self._test_type = None
963 self.client = self._connect()
968 if self._test_type is None:
969 self._test_type = self.setup_helper.find_in_section('global', 'name', None)
970 return self._test_type
972 def run_traffic(self, traffic_profile, *args):
973 self._queue.cancel_join_thread()
977 traffic_profile.init(self._queue)
978 # this frees up the run_traffic loop
979 self.client_started.value = 1
981 while not self._terminated.value:
982 # move it all to traffic_profile
983 self._run_traffic_once(traffic_profile)
985 def _run_traffic_once(self, traffic_profile):
986 traffic_profile.execute_traffic(self)
987 if traffic_profile.done.is_set():
988 self._queue.put({'done': True})
989 LOG.debug("tg_prox done")
990 self._terminated.value = 1
992 # For VNF use ResourceHelper method to collect KPIs directly.
993 # for TG leave the superclass ClientResourceHelper collect_kpi_method intact
994 def collect_collectd_kpi(self):
995 return self._collect_resource_kpi()
997 def collect_kpi(self):
998 result = super(ProxResourceHelper, self).collect_kpi()
999 # add in collectd kpis manually
1001 result['collect_stats'] = self._collect_resource_kpi()
1004 def terminate(self):
1005 # should not be called, use VNF terminate
1006 raise NotImplementedError()
1009 return self.sut # force connection
1011 def execute(self, cmd, *args, **kwargs):
1012 func = getattr(self.sut, cmd, None)
1014 return func(*args, **kwargs)
1017 def _connect(self, client=None):
1018 """Run and connect to prox on the remote system """
1019 # De-allocating a large amount of hugepages takes some time. If a new
1020 # PROX instance is started immediately after killing the previous one,
1021 # it might not be able to allocate hugepages, because they are still
1022 # being freed. Hence the -w switch.
1023 # self.connection.execute("sudo killall -w Prox 2>/dev/null")
1024 # prox_cmd = "export TERM=xterm; cd "+ self.bin_path +"; ./Prox -t
1025 # -f ./handle_none-4.cfg"
1026 # prox_cmd = "export TERM=xterm; export RTE_SDK=" + self._dpdk_dir +
1028 # + "export RTE_TARGET=" + self._dpdk_target + ";" \
1029 # + " cd " + self._prox_dir + "; make HW_DIRECT_STATS=y -j50;
1031 # + "./build/Prox " + prox_args
1032 # log.debug("Starting PROX with command [%s]", prox_cmd)
1033 # thread.start_new_thread(self.ssh_check_quit, (self, self._user,
1034 # self._ip, prox_cmd))
1036 client = ProxSocketHelper()
1038 # try connecting to Prox for 60s
1039 for _ in range(RETRY_SECONDS):
1040 time.sleep(RETRY_INTERVAL)
1042 client.connect(self._ip, PROX_PORT)
1043 except (socket.gaierror, socket.error):
1048 msg = "Failed to connect to prox, please check if system {} accepts connections on port {}"
1049 raise Exception(msg.format(self._ip, PROX_PORT))
1052 class ProxDataHelper(object):
1054 def __init__(self, vnfd_helper, sut, pkt_size, value, tolerated_loss, line_speed):
1055 super(ProxDataHelper, self).__init__()
1056 self.vnfd_helper = vnfd_helper
1058 self.pkt_size = pkt_size
1060 self.line_speed = line_speed
1061 self.tolerated_loss = tolerated_loss
1062 self.port_count = len(self.vnfd_helper.port_pairs.all_ports)
1064 self.measured_stats = None
1066 self._totals_and_pps = None
1067 self.result_tuple = None
1070 def totals_and_pps(self):
1071 if self._totals_and_pps is None:
1072 rx_total = tx_total = 0
1073 all_ports = self.sut.multi_port_stats(range(self.port_count))
1074 for port in all_ports:
1075 rx_total = rx_total + port[1]
1076 tx_total = tx_total + port[2]
1077 requested_pps = self.value / 100.0 * self.line_rate_to_pps()
1078 self._totals_and_pps = rx_total, tx_total, requested_pps
1079 return self._totals_and_pps
1083 return self.totals_and_pps[0]
1087 return self.totals_and_pps[1]
1090 def requested_pps(self):
1091 return self.totals_and_pps[2]
1098 for port_name, port_num in self.vnfd_helper.ports_iter():
1099 ports.append(port_num)
1100 port_names.append(port_name)
1102 results = self.sut.multi_port_stats(ports)
1103 for result in results:
1104 port_num = result[0]
1105 samples[port_names[port_num]] = {
1106 "in_packets": result[1],
1107 "out_packets": result[2]}
1110 def __enter__(self):
1111 self.check_interface_count()
1114 def __exit__(self, exc_type, exc_val, exc_tb):
1117 def make_tuple(self):
1118 if self.result_tuple:
1121 self.result_tuple = ProxTestDataTuple(
1122 self.tolerated_loss,
1124 self.measured_stats['delta'].rx,
1125 self.measured_stats['delta'].tx,
1126 self.measured_stats['delta'].tsc,
1132 self.result_tuple.log_data()
1135 def measure_tot_stats(self):
1136 with self.sut.measure_tot_stats() as self.measured_stats:
1139 def check_interface_count(self):
1140 # do this assert in init? unless we expect interface count to
1141 # change from one run to another run...
1142 assert self.port_count in {1, 2, 4}, \
1143 "Invalid number of ports: 1, 2 or 4 ports only supported at this time"
1145 def capture_tsc_hz(self):
1146 self.tsc_hz = float(self.sut.hz())
1148 def line_rate_to_pps(self):
1149 return self.port_count * self.line_speed / BITS_PER_BYTE / (self.pkt_size + 20)
1151 class ProxProfileHelper(object):
1153 __prox_profile_type__ = "Generic"
1155 PROX_CORE_GEN_MODE = "gen"
1156 PROX_CORE_LAT_MODE = "lat"
1159 def get_cls(cls, helper_type):
1160 """Return class of specified type."""
1162 return ProxProfileHelper
1164 for profile_helper_class in utils.itersubclasses(cls):
1165 if helper_type == profile_helper_class.__prox_profile_type__:
1166 return profile_helper_class
1168 return ProxProfileHelper
1171 def make_profile_helper(cls, resource_helper):
1172 return cls.get_cls(resource_helper.test_type)(resource_helper)
1174 def __init__(self, resource_helper):
1175 super(ProxProfileHelper, self).__init__()
1176 self.resource_helper = resource_helper
1177 self._cpu_topology = None
1178 self._test_cores = None
1179 self._latency_cores = None
1182 def cpu_topology(self):
1183 if not self._cpu_topology:
1184 stdout = io.BytesIO()
1185 self.ssh_helper.get_file_obj("/proc/cpuinfo", stdout)
1186 self._cpu_topology = SocketTopology.parse_cpuinfo(stdout.getvalue().decode('utf-8'))
1187 return self._cpu_topology
1190 def test_cores(self):
1191 if not self._test_cores:
1192 self._test_cores = self.get_cores(self.PROX_CORE_GEN_MODE)
1193 return self._test_cores
1196 def latency_cores(self):
1197 if not self._latency_cores:
1198 self._latency_cores = self.get_cores(self.PROX_CORE_LAT_MODE)
1199 return self._latency_cores
1202 def traffic_context(self, pkt_size, value):
1204 self.sut.reset_stats()
1206 self.sut.set_pkt_size(self.test_cores, pkt_size)
1207 self.sut.set_speed(self.test_cores, value)
1208 self.sut.start_all()
1214 def get_cores(self, mode):
1217 for section_name, section in self.setup_helper.prox_config_data:
1218 if not section_name.startswith("core"):
1221 for key, value in section:
1222 if key == "mode" and value == mode:
1223 core_tuple = CoreSocketTuple(section_name)
1224 core = core_tuple.core_id
1229 def pct_10gbps(self, percent, line_speed):
1230 """Get rate in percent of 10 Gbps.
1232 Returns the rate in percent of 10 Gbps.
1233 For instance 100.0 = 10 Gbps; 400.0 = 40 Gbps.
1235 This helper method isrequired when setting interface_speed option in
1236 the testcase because NSB/PROX considers 10Gbps as 100% of line rate,
1237 this means that the line rate must be expressed as a percentage of
1240 :param percent: (float) Percent of line rate (100.0 = line rate).
1241 :param line_speed: (int) line rate speed, in bits per second.
1243 :return: (float) Represents the rate in percent of 10Gbps.
1245 return (percent * line_speed / (
1246 constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT))
1248 def run_test(self, pkt_size, duration, value, tolerated_loss=0.0,
1249 line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
1250 data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size,
1251 value, tolerated_loss, line_speed)
1253 with data_helper, self.traffic_context(pkt_size,
1254 self.pct_10gbps(value, line_speed)):
1255 with data_helper.measure_tot_stats():
1256 time.sleep(duration)
1257 # Getting statistics to calculate PPS at right speed....
1258 data_helper.capture_tsc_hz()
1259 data_helper.latency = self.get_latency()
1261 return data_helper.result_tuple, data_helper.samples
1263 def get_latency(self):
1265 :return: return lat_min, lat_max, lat_avg
1269 if not self._latency_cores:
1270 self._latency_cores = self.get_cores(self.PROX_CORE_LAT_MODE)
1272 if self._latency_cores:
1273 return self.sut.lat_stats(self._latency_cores)
1276 def terminate(self):
1279 def __getattr__(self, item):
1280 return getattr(self.resource_helper, item)
1283 class ProxMplsProfileHelper(ProxProfileHelper):
1285 __prox_profile_type__ = "MPLS tag/untag"
1287 def __init__(self, resource_helper):
1288 super(ProxMplsProfileHelper, self).__init__(resource_helper)
1289 self._cores_tuple = None
1292 def mpls_cores(self):
1293 if not self._cores_tuple:
1294 self._cores_tuple = self.get_cores_mpls()
1295 return self._cores_tuple
1298 def tagged_cores(self):
1299 return self.mpls_cores[0]
1302 def plain_cores(self):
1303 return self.mpls_cores[1]
1305 def get_cores_mpls(self):
1308 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1309 if not section_name.startswith("core"):
1312 if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section):
1315 for item_key, item_value in section:
1316 if item_key != 'name':
1319 if item_value.startswith("tag"):
1320 core_tuple = CoreSocketTuple(section_name)
1321 core_tag = core_tuple.core_id
1322 cores_tagged.append(core_tag)
1324 elif item_value.startswith("udp"):
1325 core_tuple = CoreSocketTuple(section_name)
1326 core_udp = core_tuple.core_id
1327 cores_plain.append(core_udp)
1329 return cores_tagged, cores_plain
1332 def traffic_context(self, pkt_size, value):
1334 self.sut.reset_stats()
1336 self.sut.set_pkt_size(self.tagged_cores, pkt_size)
1337 self.sut.set_pkt_size(self.plain_cores, pkt_size - 4)
1338 self.sut.set_speed(self.tagged_cores, value)
1339 ratio = 1.0 * (pkt_size - 4 + 20) / (pkt_size + 20)
1340 self.sut.set_speed(self.plain_cores, value * ratio)
1341 self.sut.start_all()
1348 class ProxBngProfileHelper(ProxProfileHelper):
1350 __prox_profile_type__ = "BNG gen"
1352 def __init__(self, resource_helper):
1353 super(ProxBngProfileHelper, self).__init__(resource_helper)
1354 self._cores_tuple = None
1357 def bng_cores(self):
1358 if not self._cores_tuple:
1359 self._cores_tuple = self.get_cores_gen_bng_qos()
1360 return self._cores_tuple
1363 def cpe_cores(self):
1364 return self.bng_cores[0]
1367 def inet_cores(self):
1368 return self.bng_cores[1]
1371 def arp_cores(self):
1372 return self.bng_cores[2]
1375 def arp_task_cores(self):
1376 return self.bng_cores[3]
1379 def all_rx_cores(self):
1380 return self.latency_cores
1382 def get_cores_gen_bng_qos(self):
1386 arp_tasks_core = [0]
1387 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1388 if not section_name.startswith("core"):
1391 if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section):
1394 for item_key, item_value in section:
1395 if item_key != 'name':
1398 if item_value.startswith("cpe"):
1399 core_tuple = CoreSocketTuple(section_name)
1400 cpe_core = core_tuple.core_id
1401 cpe_cores.append(cpe_core)
1403 elif item_value.startswith("inet"):
1404 core_tuple = CoreSocketTuple(section_name)
1405 inet_core = core_tuple.core_id
1406 inet_cores.append(inet_core)
1408 elif item_value.startswith("arp"):
1409 core_tuple = CoreSocketTuple(section_name)
1410 arp_core = core_tuple.core_id
1411 arp_cores.append(arp_core)
1413 # We check the tasks/core separately
1414 if item_value.startswith("arp_task"):
1415 core_tuple = CoreSocketTuple(section_name)
1416 arp_task_core = core_tuple.core_id
1417 arp_tasks_core.append(arp_task_core)
1419 return cpe_cores, inet_cores, arp_cores, arp_tasks_core
1422 def traffic_context(self, pkt_size, value):
1423 # Tester is sending packets at the required speed already after
1424 # setup_test(). Just get the current statistics, sleep the required
1425 # amount of time and calculate packet loss.
1426 inet_pkt_size = pkt_size
1427 cpe_pkt_size = pkt_size - 24
1428 ratio = 1.0 * (cpe_pkt_size + 20) / (inet_pkt_size + 20)
1430 curr_up_speed = curr_down_speed = 0
1431 max_up_speed = max_down_speed = value
1433 max_down_speed = value * ratio
1435 max_up_speed = value / ratio
1441 # Flush any packets in the NIC RX buffers, otherwise the stats will be
1443 self.sut.start(self.all_rx_cores)
1445 self.sut.stop(self.all_rx_cores)
1447 self.sut.reset_stats()
1449 self.sut.set_pkt_size(self.inet_cores, inet_pkt_size)
1450 self.sut.set_pkt_size(self.cpe_cores, cpe_pkt_size)
1452 self.sut.reset_values(self.cpe_cores)
1453 self.sut.reset_values(self.inet_cores)
1455 # Set correct IP and UDP lengths in packet headers
1457 # IP length (byte 24): 26 for MAC(12), EthType(2), QinQ(8), CRC(4)
1458 self.sut.set_value(self.cpe_cores, 24, cpe_pkt_size - 26, 2)
1459 # UDP length (byte 46): 46 for MAC(12), EthType(2), QinQ(8), IP(20), CRC(4)
1460 self.sut.set_value(self.cpe_cores, 46, cpe_pkt_size - 46, 2)
1463 # IP length (byte 20): 22 for MAC(12), EthType(2), MPLS(4), CRC(4)
1464 self.sut.set_value(self.inet_cores, 20, inet_pkt_size - 22, 2)
1465 # IP length (byte 48): 50 for MAC(12), EthType(2), MPLS(4), IP(20), GRE(8), CRC(4)
1466 self.sut.set_value(self.inet_cores, 48, inet_pkt_size - 50, 2)
1467 # UDP length (byte 70): 70 for MAC(12), EthType(2), MPLS(4), IP(20), GRE(8), IP(20), CRC(4)
1468 self.sut.set_value(self.inet_cores, 70, inet_pkt_size - 70, 2)
1470 # Sending ARP to initialize tables - need a few seconds of generation
1471 # to make sure all CPEs are initialized
1472 LOG.info("Initializing SUT: sending ARP packets")
1473 self.sut.set_speed(self.arp_cores, 1, self.arp_task_cores)
1474 self.sut.set_speed(self.inet_cores, curr_up_speed)
1475 self.sut.set_speed(self.cpe_cores, curr_down_speed)
1476 self.sut.start(self.arp_cores)
1479 # Ramp up the transmission speed. First go to the common speed, then
1480 # increase steps for the faster one.
1481 self.sut.start(self.cpe_cores + self.inet_cores + self.latency_cores)
1483 LOG.info("Ramping up speed to %s up, %s down", max_up_speed, max_down_speed)
1485 while (curr_up_speed < max_up_speed) or (curr_down_speed < max_down_speed):
1486 # The min(..., ...) takes care of 1) floating point rounding errors
1487 # that could make curr_*_speed to be slightly greater than
1488 # max_*_speed and 2) max_*_speed not being an exact multiple of
1490 if curr_up_speed < max_up_speed:
1491 curr_up_speed = min(curr_up_speed + self.step_delta, max_up_speed)
1492 if curr_down_speed < max_down_speed:
1493 curr_down_speed = min(curr_down_speed + self.step_delta, max_down_speed)
1495 self.sut.set_speed(self.inet_cores, curr_up_speed)
1496 self.sut.set_speed(self.cpe_cores, curr_down_speed)
1497 time.sleep(self.step_time)
1499 LOG.info("Target speeds reached. Starting real test.")
1503 self.sut.stop(self.arp_cores + self.cpe_cores + self.inet_cores)
1504 LOG.info("Test ended. Flushing NIC buffers")
1505 self.sut.start(self.all_rx_cores)
1507 self.sut.stop(self.all_rx_cores)
1509 def run_test(self, pkt_size, duration, value, tolerated_loss=0.0,
1510 line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
1511 data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size,
1512 value, tolerated_loss, line_speed)
1514 with data_helper, self.traffic_context(pkt_size,
1515 self.pct_10gbps(value, line_speed)):
1516 with data_helper.measure_tot_stats():
1517 time.sleep(duration)
1518 # Getting statistics to calculate PPS at right speed....
1519 data_helper.capture_tsc_hz()
1520 data_helper.latency = self.get_latency()
1522 return data_helper.result_tuple, data_helper.samples
1525 class ProxVpeProfileHelper(ProxProfileHelper):
1527 __prox_profile_type__ = "vPE gen"
1529 def __init__(self, resource_helper):
1530 super(ProxVpeProfileHelper, self).__init__(resource_helper)
1531 self._cores_tuple = None
1532 self._ports_tuple = None
1535 def vpe_cores(self):
1536 if not self._cores_tuple:
1537 self._cores_tuple = self.get_cores_gen_vpe()
1538 return self._cores_tuple
1541 def cpe_cores(self):
1542 return self.vpe_cores[0]
1545 def inet_cores(self):
1546 return self.vpe_cores[1]
1549 def all_rx_cores(self):
1550 return self.latency_cores
1553 def vpe_ports(self):
1554 if not self._ports_tuple:
1555 self._ports_tuple = self.get_ports_gen_vpe()
1556 return self._ports_tuple
1559 def cpe_ports(self):
1560 return self.vpe_ports[0]
1563 def inet_ports(self):
1564 return self.vpe_ports[1]
1566 def get_cores_gen_vpe(self):
1569 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1570 if not section_name.startswith("core"):
1573 if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section):
1576 for item_key, item_value in section:
1577 if item_key != 'name':
1580 if item_value.startswith("cpe"):
1581 core_tuple = CoreSocketTuple(section_name)
1582 core_tag = core_tuple.core_id
1583 cpe_cores.append(core_tag)
1585 elif item_value.startswith("inet"):
1586 core_tuple = CoreSocketTuple(section_name)
1587 inet_core = core_tuple.core_id
1588 inet_cores.append(inet_core)
1590 return cpe_cores, inet_cores
1592 def get_ports_gen_vpe(self):
1596 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1597 if not section_name.startswith("port"):
1599 tx_port_iter = re.finditer(r'\d+', section_name)
1600 tx_port_no = int(next(tx_port_iter).group(0))
1602 for item_key, item_value in section:
1603 if item_key != 'name':
1606 if item_value.startswith("cpe"):
1607 cpe_ports.append(tx_port_no)
1609 elif item_value.startswith("inet"):
1610 inet_ports.append(tx_port_no)
1612 return cpe_ports, inet_ports
1615 def traffic_context(self, pkt_size, value):
1616 # Calculate the target upload and download speed. The upload and
1617 # download packets have different packet sizes, so in order to get
1618 # equal bandwidth usage, the ratio of the speeds has to match the ratio
1619 # of the packet sizes.
1620 cpe_pkt_size = pkt_size
1621 inet_pkt_size = pkt_size - 4
1622 ratio = 1.0 * (cpe_pkt_size + 20) / (inet_pkt_size + 20)
1624 curr_up_speed = curr_down_speed = 0
1625 max_up_speed = max_down_speed = value
1627 max_down_speed = value * ratio
1629 max_up_speed = value / ratio
1631 # Adjust speed when multiple cores per port are used to generate traffic
1632 if len(self.cpe_ports) != len(self.cpe_cores):
1633 max_down_speed *= 1.0 * len(self.cpe_ports) / len(self.cpe_cores)
1634 if len(self.inet_ports) != len(self.inet_cores):
1635 max_up_speed *= 1.0 * len(self.inet_ports) / len(self.inet_cores)
1641 # Flush any packets in the NIC RX buffers, otherwise the stats will be
1643 self.sut.start(self.all_rx_cores)
1645 self.sut.stop(self.all_rx_cores)
1647 self.sut.reset_stats()
1649 self.sut.set_pkt_size(self.inet_cores, inet_pkt_size)
1650 self.sut.set_pkt_size(self.cpe_cores, cpe_pkt_size)
1652 self.sut.reset_values(self.cpe_cores)
1653 self.sut.reset_values(self.inet_cores)
1655 # Set correct IP and UDP lengths in packet headers
1656 # CPE: IP length (byte 24): 26 for MAC(12), EthType(2), QinQ(8), CRC(4)
1657 self.sut.set_value(self.cpe_cores, 24, cpe_pkt_size - 26, 2)
1658 # UDP length (byte 46): 46 for MAC(12), EthType(2), QinQ(8), IP(20), CRC(4)
1659 self.sut.set_value(self.cpe_cores, 46, cpe_pkt_size - 46, 2)
1661 # INET: IP length (byte 20): 22 for MAC(12), EthType(2), MPLS(4), CRC(4)
1662 self.sut.set_value(self.inet_cores, 20, inet_pkt_size - 22, 2)
1663 # UDP length (byte 42): 42 for MAC(12), EthType(2), MPLS(4), IP(20), CRC(4)
1664 self.sut.set_value(self.inet_cores, 42, inet_pkt_size - 42, 2)
1666 self.sut.set_speed(self.inet_cores, curr_up_speed)
1667 self.sut.set_speed(self.cpe_cores, curr_down_speed)
1669 # Ramp up the transmission speed. First go to the common speed, then
1670 # increase steps for the faster one.
1671 self.sut.start(self.cpe_cores + self.inet_cores + self.all_rx_cores)
1673 LOG.info("Ramping up speed to %s up, %s down", max_up_speed, max_down_speed)
1675 while (curr_up_speed < max_up_speed) or (curr_down_speed < max_down_speed):
1676 # The min(..., ...) takes care of 1) floating point rounding errors
1677 # that could make curr_*_speed to be slightly greater than
1678 # max_*_speed and 2) max_*_speed not being an exact multiple of
1680 if curr_up_speed < max_up_speed:
1681 curr_up_speed = min(curr_up_speed + self.step_delta, max_up_speed)
1682 if curr_down_speed < max_down_speed:
1683 curr_down_speed = min(curr_down_speed + self.step_delta, max_down_speed)
1685 self.sut.set_speed(self.inet_cores, curr_up_speed)
1686 self.sut.set_speed(self.cpe_cores, curr_down_speed)
1687 time.sleep(self.step_time)
1689 LOG.info("Target speeds reached. Starting real test.")
1693 self.sut.stop(self.cpe_cores + self.inet_cores)
1694 LOG.info("Test ended. Flushing NIC buffers")
1695 self.sut.start(self.all_rx_cores)
1697 self.sut.stop(self.all_rx_cores)
1699 def run_test(self, pkt_size, duration, value, tolerated_loss=0.0,
1700 line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
1701 data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size,
1702 value, tolerated_loss, line_speed)
1704 with data_helper, self.traffic_context(pkt_size,
1705 self.pct_10gbps(value, line_speed)):
1706 with data_helper.measure_tot_stats():
1707 time.sleep(duration)
1708 # Getting statistics to calculate PPS at right speed....
1709 data_helper.capture_tsc_hz()
1710 data_helper.latency = self.get_latency()
1712 return data_helper.result_tuple, data_helper.samples
1715 class ProxlwAFTRProfileHelper(ProxProfileHelper):
1717 __prox_profile_type__ = "lwAFTR gen"
1719 def __init__(self, resource_helper):
1720 super(ProxlwAFTRProfileHelper, self).__init__(resource_helper)
1721 self._cores_tuple = None
1722 self._ports_tuple = None
1724 self.step_time = 0.5
1727 def _lwaftr_cores(self):
1728 if not self._cores_tuple:
1729 self._cores_tuple = self._get_cores_gen_lwaftr()
1730 return self._cores_tuple
1733 def tun_cores(self):
1734 return self._lwaftr_cores[0]
1737 def inet_cores(self):
1738 return self._lwaftr_cores[1]
1741 def _lwaftr_ports(self):
1742 if not self._ports_tuple:
1743 self._ports_tuple = self._get_ports_gen_lw_aftr()
1744 return self._ports_tuple
1747 def tun_ports(self):
1748 return self._lwaftr_ports[0]
1751 def inet_ports(self):
1752 return self._lwaftr_ports[1]
1755 def all_rx_cores(self):
1756 return self.latency_cores
1758 def _get_cores_gen_lwaftr(self):
1761 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1762 if not section_name.startswith("core"):
1765 if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section):
1768 core_tuple = CoreSocketTuple(section_name)
1769 core_tag = core_tuple.core_id
1770 for item_value in (v for k, v in section if k == 'name'):
1771 if item_value.startswith('tun'):
1772 tun_cores.append(core_tag)
1773 elif item_value.startswith('inet'):
1774 inet_cores.append(core_tag)
1776 return tun_cores, inet_cores
1778 def _get_ports_gen_lw_aftr(self):
1782 re_port = re.compile(r'port (\d+)')
1783 for section_name, section in self.resource_helper.setup_helper.prox_config_data:
1784 match = re_port.search(section_name)
1788 tx_port_no = int(match.group(1))
1789 for item_value in (v for k, v in section if k == 'name'):
1790 if item_value.startswith('lwB4'):
1791 tun_ports.append(tx_port_no)
1792 elif item_value.startswith('inet'):
1793 inet_ports.append(tx_port_no)
1795 return tun_ports, inet_ports
1798 def _resize(len1, len2):
1801 return 1.0 * len1 / len2
1804 def traffic_context(self, pkt_size, value):
1805 # Tester is sending packets at the required speed already after
1806 # setup_test(). Just get the current statistics, sleep the required
1807 # amount of time and calculate packet loss.
1808 tun_pkt_size = pkt_size
1809 inet_pkt_size = pkt_size - 40
1810 ratio = 1.0 * (tun_pkt_size + 20) / (inet_pkt_size + 20)
1812 curr_up_speed = curr_down_speed = 0
1813 max_up_speed = max_down_speed = value
1815 max_up_speed = value / ratio
1817 # Adjust speed when multiple cores per port are used to generate traffic
1818 if len(self.tun_ports) != len(self.tun_cores):
1819 max_down_speed *= self._resize(len(self.tun_ports), len(self.tun_cores))
1820 if len(self.inet_ports) != len(self.inet_cores):
1821 max_up_speed *= self._resize(len(self.inet_ports), len(self.inet_cores))
1827 # Flush any packets in the NIC RX buffers, otherwise the stats will be
1829 self.sut.start(self.all_rx_cores)
1831 self.sut.stop(self.all_rx_cores)
1833 self.sut.reset_stats()
1835 self.sut.set_pkt_size(self.inet_cores, inet_pkt_size)
1836 self.sut.set_pkt_size(self.tun_cores, tun_pkt_size)
1838 self.sut.reset_values(self.tun_cores)
1839 self.sut.reset_values(self.inet_cores)
1841 # Set correct IP and UDP lengths in packet headers
1843 # IPv6 length (byte 18): 58 for MAC(12), EthType(2), IPv6(40) , CRC(4)
1844 self.sut.set_value(self.tun_cores, 18, tun_pkt_size - 58, 2)
1845 # IP length (byte 56): 58 for MAC(12), EthType(2), CRC(4)
1846 self.sut.set_value(self.tun_cores, 56, tun_pkt_size - 58, 2)
1847 # UDP length (byte 78): 78 for MAC(12), EthType(2), IP(20), UDP(8), CRC(4)
1848 self.sut.set_value(self.tun_cores, 78, tun_pkt_size - 78, 2)
1851 # IP length (byte 20): 22 for MAC(12), EthType(2), CRC(4)
1852 self.sut.set_value(self.inet_cores, 16, inet_pkt_size - 18, 2)
1853 # UDP length (byte 42): 42 for MAC(12), EthType(2), IP(20), UPD(8), CRC(4)
1854 self.sut.set_value(self.inet_cores, 38, inet_pkt_size - 38, 2)
1856 LOG.info("Initializing SUT: sending lwAFTR packets")
1857 self.sut.set_speed(self.inet_cores, curr_up_speed)
1858 self.sut.set_speed(self.tun_cores, curr_down_speed)
1861 # Ramp up the transmission speed. First go to the common speed, then
1862 # increase steps for the faster one.
1863 self.sut.start(self.tun_cores + self.inet_cores + self.latency_cores)
1865 LOG.info("Ramping up speed to %s up, %s down", max_up_speed, max_down_speed)
1867 while (curr_up_speed < max_up_speed) or (curr_down_speed < max_down_speed):
1868 # The min(..., ...) takes care of 1) floating point rounding errors
1869 # that could make curr_*_speed to be slightly greater than
1870 # max_*_speed and 2) max_*_speed not being an exact multiple of
1872 if curr_up_speed < max_up_speed:
1873 curr_up_speed = min(curr_up_speed + self.step_delta, max_up_speed)
1874 if curr_down_speed < max_down_speed:
1875 curr_down_speed = min(curr_down_speed + self.step_delta, max_down_speed)
1877 self.sut.set_speed(self.inet_cores, curr_up_speed)
1878 self.sut.set_speed(self.tun_cores, curr_down_speed)
1879 time.sleep(self.step_time)
1881 LOG.info("Target speeds reached. Starting real test.")
1885 self.sut.stop(self.tun_cores + self.inet_cores)
1886 LOG.info("Test ended. Flushing NIC buffers")
1887 self.sut.start(self.all_rx_cores)
1889 self.sut.stop(self.all_rx_cores)
1891 def run_test(self, pkt_size, duration, value, tolerated_loss=0.0,
1892 line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
1893 data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size,
1894 value, tolerated_loss, line_speed)
1896 with data_helper, self.traffic_context(pkt_size,
1897 self.pct_10gbps(value, line_speed)):
1898 with data_helper.measure_tot_stats():
1899 time.sleep(duration)
1900 # Getting statistics to calculate PPS at right speed....
1901 data_helper.capture_tsc_hz()
1902 data_helper.latency = self.get_latency()
1904 return data_helper.result_tuple, data_helper.samples