import math
import os
+import sys
import random
import time
import traceback
+from functools import reduce
from itertools import count
# pylint: disable=import-error
from scapy.contrib.mpls import MPLS # flake8: noqa
# pylint: enable=import-error
from nfvbench.log import LOG
+from nfvbench.specs import ChainType
from nfvbench.traffic_server import TRexTrafficServer
from nfvbench.utils import cast_integer
from nfvbench.utils import timeout
from nfvbench.utils import TimeoutError
+from hdrh.histogram import HdrHistogram
+
# pylint: disable=import-error
from trex.common.services.trex_service_arp import ServiceARP
+from trex.stl.api import ARP
from trex.stl.api import bind_layers
from trex.stl.api import CTRexVmInsFixHwCs
from trex.stl.api import Dot1Q
from trex.stl.api import STLScVmRaw
from trex.stl.api import STLStream
from trex.stl.api import STLTXCont
+from trex.stl.api import STLTXMultiBurst
from trex.stl.api import STLVmFixChecksumHw
from trex.stl.api import STLVmFixIpv4
from trex.stl.api import STLVmFlowVar
from trex.stl.api import STLVmFlowVarRepeatableRandom
+from trex.stl.api import STLVmTupleGen
from trex.stl.api import STLVmWrFlowVar
from trex.stl.api import ThreeBytesField
from trex.stl.api import UDP
self.rates = []
self.capture_id = None
self.packet_list = []
+ self.l2_frame_size = 0
def get_version(self):
"""Get the Trex version."""
pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
- def extract_stats(self, in_stats):
+ def extract_stats(self, in_stats, ifstats):
"""Extract stats from dict returned by Trex API.
:param in_stats: dict as returned by TRex api
self.__combine_latencies(in_stats, result[ph]['rx'], ph)
total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
+
+ # in case of GARP packets we need to base total_tx_pkts value using flow_stats
+ # as no GARP packets have no flow stats and will not be received on the other port
+ if self.config.periodic_gratuitous_arp:
+ if not self.config.no_flow_stats and not self.config.no_latency_stats:
+ global_total_tx_pkts = total_tx_pkts
+ total_tx_pkts = 0
+ if ifstats:
+ for chain_id, _ in enumerate(ifstats):
+ for ph in self.port_handle:
+ pg_id, lat_pg_id = self.get_pg_id(ph, chain_id)
+ flows_tx_pkts = in_stats['flow_stats'][pg_id]['tx_pkts']['total'] + \
+ in_stats['flow_stats'][lat_pg_id]['tx_pkts']['total']
+ result[ph]['tx']['total_pkts'] = flows_tx_pkts
+ total_tx_pkts += flows_tx_pkts
+ else:
+ for pg_id in in_stats['flow_stats']:
+ if pg_id != 'global':
+ total_tx_pkts += in_stats['flow_stats'][pg_id]['tx_pkts']['total']
+ result["garp_total_tx_rate"] = cast_integer(
+ (global_total_tx_pkts - total_tx_pkts) / self.config.duration_sec)
+ else:
+ LOG.warning("Gratuitous ARP are not received by the other port so TRex and NFVbench"
+ " see these packets as dropped. Please do not activate no_flow_stats"
+ " and no_latency_stats properties to have a better drop rate.")
+
result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
+ # actual offered tx rate in bps
+ avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
+ total_tx_bps = utils.pps_to_bps(result["total_tx_rate"], avg_packet_size)
+ result['offered_tx_rate_bps'] = total_tx_bps
+
+ result.update(self.get_theoretical_rates(avg_packet_size))
+
result["flow_stats"] = in_stats["flow_stats"]
result["latency"] = in_stats["latency"]
+
+ # Merge HDRHistogram to have an overall value for all chains and ports
+ # (provided that the histogram exists in the stats returned by T-Rex)
+ # Of course, empty histograms will produce an empty (invalid) histogram.
+ try:
+ hdrh_list = []
+ if ifstats:
+ for chain_id, _ in enumerate(ifstats):
+ for ph in self.port_handle:
+ _, lat_pg_id = self.get_pg_id(ph, chain_id)
+ hdrh_list.append(
+ HdrHistogram.decode(in_stats['latency'][lat_pg_id]['latency']['hdrh']))
+ else:
+ for pg_id in in_stats['latency']:
+ if pg_id != 'global':
+ hdrh_list.append(
+ HdrHistogram.decode(in_stats['latency'][pg_id]['latency']['hdrh']))
+
+ def add_hdrh(x, y):
+ x.add(y)
+ return x
+ decoded_hdrh = reduce(add_hdrh, hdrh_list)
+ result["overall_hdrh"] = HdrHistogram.encode(decoded_hdrh).decode('utf-8')
+ except KeyError:
+ pass
+
return result
def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
bind_layers(UDP, VXLAN, dport=4789)
bind_layers(VXLAN, Ether)
- def _create_pkt(self, stream_cfg, l2frame_size):
+ def _create_pkt(self, stream_cfg, l2frame_size, disable_random_latency_flow=False):
"""Create a packet of given size.
l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
udp_args = {}
if stream_cfg['udp_src_port']:
udp_args['sport'] = int(stream_cfg['udp_src_port'])
- udp_args['sport_step'] = int(stream_cfg['udp_port_step'])
+ if stream_cfg['udp_port_step'] == 'random':
+ step = 1
+ else:
+ step = stream_cfg['udp_port_step']
+ udp_args['sport_step'] = int(step)
udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
if stream_cfg['udp_dst_port']:
udp_args['dport'] = int(stream_cfg['udp_dst_port'])
- udp_args['dport_step'] = int(stream_cfg['udp_port_step'])
+ if stream_cfg['udp_port_step'] == 'random':
+ step = 1
+ else:
+ step = stream_cfg['udp_port_step']
+ udp_args['dport_step'] = int(step)
udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
UDP(dport=udp_args['dport'], sport=udp_args['sport'])
- if stream_cfg['ip_src_static'] is True:
- src_max_ip_value = stream_cfg['ip_src_addr']
- else:
- src_max_ip_value = stream_cfg['ip_src_addr_max']
- if stream_cfg['ip_addrs_step'] == 'random':
- src_fv_ip = STLVmFlowVarRepeatableRandom(
- name="ip_src",
- min_value=stream_cfg['ip_src_addr'],
- max_value=src_max_ip_value,
- size=4,
- seed=random.randint(0, 32767),
- limit=stream_cfg['ip_src_count'])
- dst_fv_ip = STLVmFlowVarRepeatableRandom(
- name="ip_dst",
- min_value=stream_cfg['ip_dst_addr'],
- max_value=stream_cfg['ip_dst_addr_max'],
- size=4,
- seed=random.randint(0, 32767),
- limit=stream_cfg['ip_dst_count'])
- else:
- src_fv_ip = STLVmFlowVar(
- name="ip_src",
- min_value=stream_cfg['ip_src_addr'],
- max_value=src_max_ip_value,
- size=4,
- op="inc",
- step=stream_cfg['ip_addrs_step'])
- dst_fv_ip = STLVmFlowVar(
- name="ip_dst",
- min_value=stream_cfg['ip_dst_addr'],
- max_value=stream_cfg['ip_dst_addr_max'],
- size=4,
- op="inc",
- step=stream_cfg['ip_addrs_step'])
-
- if stream_cfg['udp_port_step'] == 'random':
- src_fv_port = STLVmFlowVarRepeatableRandom(
- name="p_src",
- min_value=udp_args['sport'],
- max_value=udp_args['sport_max'],
- size=2,
- seed=random.randint(0, 32767),
- limit=udp_args['udp_src_count'])
- dst_fv_port = STLVmFlowVarRepeatableRandom(
- name="p_dst",
- min_value=udp_args['dport'],
- max_value=udp_args['dport_max'],
- size=2,
- seed=random.randint(0, 32767),
- limit=stream_cfg['udp_dst_count'])
+
+ # STLVmTupleGen need flow count >= cores used by TRex, if FC < cores we used STLVmFlowVar
+ if stream_cfg['ip_addrs_step'] == '0.0.0.1' and stream_cfg['udp_port_step'] == '1' and \
+ stream_cfg['count'] >= self.generator_config.cores:
+ src_fv = STLVmTupleGen(ip_min=stream_cfg['ip_src_addr'],
+ ip_max=stream_cfg['ip_src_addr_max'],
+ port_min=udp_args['sport'],
+ port_max=udp_args['sport_max'],
+ name="tuple_src",
+ limit_flows=stream_cfg['count'])
+ dst_fv = STLVmTupleGen(ip_min=stream_cfg['ip_dst_addr'],
+ ip_max=stream_cfg['ip_dst_addr_max'],
+ port_min=udp_args['dport'],
+ port_max=udp_args['dport_max'],
+ name="tuple_dst",
+ limit_flows=stream_cfg['count'])
+ vm_param = [
+ src_fv,
+ STLVmWrFlowVar(fv_name="tuple_src.ip",
+ pkt_offset="IP:{}.src".format(encap_level)),
+ STLVmWrFlowVar(fv_name="tuple_src.port",
+ pkt_offset="UDP:{}.sport".format(encap_level)),
+ dst_fv,
+ STLVmWrFlowVar(fv_name="tuple_dst.ip",
+ pkt_offset="IP:{}.dst".format(encap_level)),
+ STLVmWrFlowVar(fv_name="tuple_dst.port",
+ pkt_offset="UDP:{}.dport".format(encap_level)),
+ ]
else:
- src_fv_port = STLVmFlowVar(
- name="p_src",
- min_value=udp_args['sport'],
- max_value=udp_args['sport_max'],
- size=2,
- op="inc",
- step=udp_args['sport_step'])
- dst_fv_port = STLVmFlowVar(
- name="p_dst",
- min_value=udp_args['dport'],
- max_value=udp_args['dport_max'],
- size=2,
- op="inc",
- step=udp_args['dport_step'])
- vm_param = [
- src_fv_ip,
- STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
- src_fv_port,
- STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
- dst_fv_ip,
- STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
- dst_fv_port,
- STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
- ]
+ if disable_random_latency_flow:
+ src_fv_ip = STLVmFlowVar(
+ name="ip_src",
+ min_value=stream_cfg['ip_src_addr'],
+ max_value=stream_cfg['ip_src_addr'],
+ size=4)
+ dst_fv_ip = STLVmFlowVar(
+ name="ip_dst",
+ min_value=stream_cfg['ip_dst_addr'],
+ max_value=stream_cfg['ip_dst_addr'],
+ size=4)
+ elif stream_cfg['ip_addrs_step'] == 'random':
+ src_fv_ip = STLVmFlowVarRepeatableRandom(
+ name="ip_src",
+ min_value=stream_cfg['ip_src_addr'],
+ max_value=stream_cfg['ip_src_addr_max'],
+ size=4,
+ seed=random.randint(0, 32767),
+ limit=stream_cfg['ip_src_count'])
+ dst_fv_ip = STLVmFlowVarRepeatableRandom(
+ name="ip_dst",
+ min_value=stream_cfg['ip_dst_addr'],
+ max_value=stream_cfg['ip_dst_addr_max'],
+ size=4,
+ seed=random.randint(0, 32767),
+ limit=stream_cfg['ip_dst_count'])
+ else:
+ src_fv_ip = STLVmFlowVar(
+ name="ip_src",
+ min_value=stream_cfg['ip_src_addr'],
+ max_value=stream_cfg['ip_src_addr_max'],
+ size=4,
+ op="inc",
+ step=stream_cfg['ip_addrs_step'])
+ dst_fv_ip = STLVmFlowVar(
+ name="ip_dst",
+ min_value=stream_cfg['ip_dst_addr'],
+ max_value=stream_cfg['ip_dst_addr_max'],
+ size=4,
+ op="inc",
+ step=stream_cfg['ip_addrs_step'])
+
+ if disable_random_latency_flow:
+ src_fv_port = STLVmFlowVar(
+ name="p_src",
+ min_value=udp_args['sport'],
+ max_value=udp_args['sport'],
+ size=2)
+ dst_fv_port = STLVmFlowVar(
+ name="p_dst",
+ min_value=udp_args['dport'],
+ max_value=udp_args['dport'],
+ size=2)
+ elif stream_cfg['udp_port_step'] == 'random':
+ src_fv_port = STLVmFlowVarRepeatableRandom(
+ name="p_src",
+ min_value=udp_args['sport'],
+ max_value=udp_args['sport_max'],
+ size=2,
+ seed=random.randint(0, 32767),
+ limit=stream_cfg['udp_src_count'])
+ dst_fv_port = STLVmFlowVarRepeatableRandom(
+ name="p_dst",
+ min_value=udp_args['dport'],
+ max_value=udp_args['dport_max'],
+ size=2,
+ seed=random.randint(0, 32767),
+ limit=stream_cfg['udp_dst_count'])
+ else:
+ src_fv_port = STLVmFlowVar(
+ name="p_src",
+ min_value=udp_args['sport'],
+ max_value=udp_args['sport_max'],
+ size=2,
+ op="inc",
+ step=udp_args['sport_step'])
+ dst_fv_port = STLVmFlowVar(
+ name="p_dst",
+ min_value=udp_args['dport'],
+ max_value=udp_args['dport_max'],
+ size=2,
+ op="inc",
+ step=udp_args['dport_step'])
+ vm_param = [
+ src_fv_ip,
+ STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
+ src_fv_port,
+ STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
+ dst_fv_ip,
+ STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
+ dst_fv_port,
+ STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
+ ]
# Use HW Offload to calculate the outter IP/UDP packet
vm_param.append(STLVmFixChecksumHw(l3_offset="IP:0",
l4_offset="UDP:0",
return STLPktBuilder(pkt=pkt_base / pad,
vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
+ def _create_gratuitous_arp_pkt(self, stream_cfg):
+ """Create a GARP packet.
+
+ """
+ pkt_base = Ether(src=stream_cfg['mac_src'], dst="ff:ff:ff:ff:ff:ff")
+
+ if self.config.vxlan or self.config.mpls:
+ pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
+ elif stream_cfg['vlan_tag'] is not None:
+ pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
+
+ pkt_base /= ARP(psrc=stream_cfg['ip_src_tg_gw'], hwsrc=stream_cfg['mac_src'],
+ hwdst=stream_cfg['mac_src'], pdst=stream_cfg['ip_src_tg_gw'])
+
+ return STLPktBuilder(pkt=pkt_base)
+
def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
e2e=False):
"""Create a list of streams corresponding to a given chain and stream config.
"""
streams = []
pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
- if self.config.no_flow_stats:
- LOG.info("Traffic flow statistics are disabled.")
if l2frame == 'IMIX':
for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
pkt = self._create_pkt(stream_cfg, l2_frame_size)
streams.append(STLStream(packet=pkt,
flow_stats=STLFlowStats(pg_id=pg_id,
vxlan=True)
- if not self.config.no_flow_stats else None,
+ if not self.config.no_flow_stats else None,
mode=STLTXCont(pps=ratio)))
else:
streams.append(STLStream(packet=pkt,
flow_stats=STLFlowStats(pg_id=pg_id)
- if not self.config.no_flow_stats else None,
+ if not self.config.no_flow_stats else None,
mode=STLTXCont(pps=ratio)))
if latency:
# for IMIX, the latency packets have the average IMIX packet size
- pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
+ if stream_cfg['ip_addrs_step'] == 'random' or \
+ stream_cfg['udp_port_step'] == 'random':
+ # Force latency flow to only one flow to avoid creating flows
+ # over requested flow count
+ pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE, True)
+ else:
+ pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
else:
l2frame_size = int(l2frame)
pkt = self._create_pkt(stream_cfg, l2frame_size)
+ if self.config.periodic_gratuitous_arp:
+ requested_pps = int(utils.parse_rate_str(self.rates[0])[
+ 'rate_pps']) - self.config.gratuitous_arp_pps
+ if latency:
+ requested_pps -= self.LATENCY_PPS
+ stltx_cont = STLTXCont(pps=requested_pps)
+ else:
+ stltx_cont = STLTXCont()
if e2e or stream_cfg['mpls']:
streams.append(STLStream(packet=pkt,
# Flow stats is disabled for MPLS now
# flow_stats=STLFlowStats(pg_id=pg_id),
- mode=STLTXCont()))
+ mode=stltx_cont))
else:
if stream_cfg['vxlan'] is True:
streams.append(STLStream(packet=pkt,
flow_stats=STLFlowStats(pg_id=pg_id,
vxlan=True)
- if not self.config.no_flow_stats else None,
- mode=STLTXCont()))
+ if not self.config.no_flow_stats else None,
+ mode=stltx_cont))
else:
streams.append(STLStream(packet=pkt,
flow_stats=STLFlowStats(pg_id=pg_id)
- if not self.config.no_flow_stats else None,
- mode=STLTXCont()))
+ if not self.config.no_flow_stats else None,
+ mode=stltx_cont))
# for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
# without vlan, the min l2 frame size is 64
# with vlan it is 68
# This only applies to the latency stream
- if latency and stream_cfg['vlan_tag'] and l2frame_size < 68:
- pkt = self._create_pkt(stream_cfg, 68)
+ if latency:
+ if stream_cfg['vlan_tag'] and l2frame_size < 68:
+ l2frame_size = 68
+ if stream_cfg['ip_addrs_step'] == 'random' or \
+ stream_cfg['udp_port_step'] == 'random':
+ # Force latency flow to only one flow to avoid creating flows
+ # over requested flow count
+ pkt = self._create_pkt(stream_cfg, l2frame_size, True)
+ else:
+ pkt = self._create_pkt(stream_cfg, l2frame_size)
if latency:
if self.config.no_latency_stats:
streams.append(STLStream(packet=pkt,
flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
vxlan=True)
- if not self.config.no_latency_stats else None,
+ if not self.config.no_latency_stats else None,
mode=STLTXCont(pps=self.LATENCY_PPS)))
else:
streams.append(STLStream(packet=pkt,
flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
- if not self.config.no_latency_stats else None,
+ if not self.config.no_latency_stats else None,
mode=STLTXCont(pps=self.LATENCY_PPS)))
+
+ if self.config.periodic_gratuitous_arp and (
+ self.config.l3_router or self.config.service_chain == ChainType.EXT):
+ # In case of L3 router feature or EXT chain with router
+ # and depending on ARP stale time SUT configuration
+ # Gratuitous ARP from TG port to the router is needed to keep traffic up
+ garp_pkt = self._create_gratuitous_arp_pkt(stream_cfg)
+ ibg = self.config.gratuitous_arp_pps * 1000000.0
+ packets_count = int(self.config.duration_sec / self.config.gratuitous_arp_pps)
+ streams.append(
+ STLStream(packet=garp_pkt,
+ mode=STLTXMultiBurst(pkts_per_burst=1, count=packets_count, ibg=ibg)))
return streams
@timeout(5)
def __connect(self, client):
client.connect()
+ def __local_server_status(self):
+ """ The TRex server may have started but failed initializing... and stopped.
+ This piece of code is especially designed to address
+ the case when a fatal failure occurs on a DPDK init call.
+ The TRex algorihm should be revised to include some missing timeouts (?)
+ status returned:
+ 0: no error detected
+ 1: fatal error detected - should lead to exiting the run
+ 2: error detected that could be solved by starting again
+ The diagnostic is based on parsing the local trex log file (improvable)
+ """
+ status = 0
+ message = None
+ failure = None
+ exited = None
+ cause = None
+ error = None
+ before = None
+ after = None
+ last = None
+ try:
+ with open('/tmp/trex.log', 'r', encoding="utf-8") as trex_log:
+ for _line in trex_log:
+ line = _line.strip()
+ if line.startswith('Usage:'):
+ break
+ if 'ports are bound' in line:
+ continue
+ if 'please wait' in line:
+ continue
+ if 'exit' in line.lower():
+ exited = line
+ elif 'cause' in line.lower():
+ cause = line
+ elif 'fail' in line.lower():
+ failure = line
+ elif 'msg' in line.lower():
+ message = line
+ elif (error is not None) and line:
+ after = line
+ elif line.startswith('Error:') or line.startswith('ERROR'):
+ error = line
+ before = last
+ last = line
+ except FileNotFoundError:
+ pass
+ if exited is not None:
+ status = 1
+ LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', exited)
+ if cause is not None:
+ LOG.info("TRex [cont'd] %s", cause)
+ if failure is not None:
+ LOG.info("TRex [cont'd] %s", failure)
+ if message is not None:
+ LOG.info("TRex [cont'd] %s", message)
+ if 'not supported yet' in message.lower():
+ LOG.info("TRex [cont'd] Try starting again!")
+ status = 2
+ elif error is not None:
+ status = 1
+ LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', error)
+ if after is not None:
+ LOG.info("TRex [cont'd] %s", after)
+ elif before is not None:
+ LOG.info("TRex [cont'd] %s", before)
+ return status
+
def __connect_after_start(self):
# after start, Trex may take a bit of time to initialize
# so we need to retry a few times
+ # we try to capture recoverable error cases (checking status)
+ status = 0
for it in range(self.config.generic_retry_count):
try:
time.sleep(1)
except Exception as ex:
if it == (self.config.generic_retry_count - 1):
raise
+ status = self.__local_server_status()
+ if status > 0:
+ # No need to wait anymore, something went wrong and TRex exited
+ if status == 1:
+ LOG.info("\x1b[1m%s\x1b[0m", 'TRex failed starting!')
+ print("More information? Try the command: "
+ + "\x1b[1mnfvbench --show-trex-log\x1b[0m")
+ sys.exit(0)
+ if status == 2:
+ # a new start will follow
+ return status
LOG.info("Retrying connection to TRex (%s)...", ex.msg)
+ return status
def connect(self):
"""Connect to the TRex server."""
+ status = 0
server_ip = self.generator_config.ip
LOG.info("Connecting to TRex (%s)...", server_ip)
if server_ip == '127.0.0.1':
config_updated = self.__check_config()
if config_updated or self.config.restart:
- self.__restart()
+ status = self.__restart()
except (TimeoutError, STLError) as e:
if server_ip == '127.0.0.1':
- self.__start_local_server()
+ status = self.__start_local_server()
else:
- raise TrafficGeneratorException(e.message)
+ raise TrafficGeneratorException(e.message) from e
+
+ if status == 2:
+ # Workaround in case of a failed TRex server initialization
+ # we try to start it again (twice maximum)
+ # which may allow low level initialization to complete.
+ if self.__start_local_server() == 2:
+ self.__start_local_server()
ports = list(self.generator_config.ports)
self.port_handle = ports
try:
LOG.info("Starting TRex ...")
self.__start_server()
- self.__connect_after_start()
+ status = self.__connect_after_start()
except (TimeoutError, STLError) as e:
LOG.error('Cannot connect to TRex')
LOG.error(traceback.format_exc())
break
last_size = size
time.sleep(1)
- with open(logpath, 'r') as f:
+ with open(logpath, 'r', encoding="utf-8") as f:
message = f.read()
else:
message = e.message
- raise TrafficGeneratorException(message)
+ raise TrafficGeneratorException(message) from e
+ return status
def __start_server(self):
server = TRexTrafficServer()
if not self.client.is_connected():
LOG.info("TRex is stopped...")
break
- self.__start_local_server()
+ # Start and report a possible failure
+ return self.__start_local_server()
def __stop_server(self):
if self.generator_config.ip == '127.0.0.1':
chain_count)
break
- # if the capture from the TRex console was started before the arp request step,
- # it keeps 'service_mode' enabled, otherwise, it disables the 'service_mode'
+ # A traffic capture may have been started (from a T-Rex console) at this time.
+ # If asked so, we keep the service mode enabled here, and disable it otherwise.
+ # | Disabling the service mode while a capture is in progress
+ # | would cause the application to stop/crash with an error.
if not self.config.service_mode:
self.client.set_service_mode(ports=self.port_handle, enabled=False)
if len(arp_dest_macs) == len(self.port_handle):
total_rate += int(r['rate_pps'])
else:
mult = 1
- total_rate = utils.convert_rates(l2frame_size, rates[0], intf_speed)
+ r = utils.convert_rates(l2frame_size, rates[0], intf_speed)
+ total_rate = int(r['rate_pps'])
# rate must be enough for latency stream and at least 1 pps for base stream per chain
- required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
+ if self.config.periodic_gratuitous_arp:
+ required_rate = (self.LATENCY_PPS + 1 + self.config.gratuitous_arp_pps) \
+ * self.config.service_chain_count * mult
+ else:
+ required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
result = utils.convert_rates(l2frame_size,
{'rate_pps': required_rate},
intf_speed * mult)
latency: True if latency measurement is needed
e2e: True if performing "end to end" connectivity check
"""
+ if self.config.no_flow_stats:
+ LOG.info("Traffic flow statistics are disabled.")
r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
if not r['result']:
raise TrafficGeneratorException(
.format(pps=r['rate_pps'],
bps=r['rate_bps'],
load=r['rate_percent']))
+ self.l2_frame_size = l2frame_size
# a dict of list of streams indexed by port#
# in case of fixed size, has self.chain_count * 2 * 2 streams
# (1 normal + 1 latency stream per direction per chain)
for port in self.port_handle:
streamblock[port] = []
stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
+ if self.generator_config.ip_addrs_step == 'random' \
+ or self.generator_config.gen_config.udp_port_step == 'random':
+ LOG.warning("Using random step, the number of flows can be less than "
+ "the requested number of flows due to repeatable multivariate random "
+ "generation which can reproduce the same pattern of values")
self.rates = [utils.to_rate_str(rate) for rate in rates]
for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
streamblock[0].extend(self.generate_streams(self.port_handle[0],
self.client.reset(self.port_handle)
LOG.info('Cleared all existing streams')
- def get_stats(self):
+ def get_stats(self, ifstats=None):
"""Get stats from Trex."""
stats = self.client.get_stats()
- return self.extract_stats(stats)
+ return self.extract_stats(stats, ifstats)
def get_macs(self):
"""Return the Trex local port MAC addresses.
if self.capture_id:
self.client.stop_capture(capture_id=self.capture_id['id'])
self.capture_id = None
- # if the capture from TRex console was started before the connectivity step,
- # it keeps 'service_mode' enabled, otherwise, it disables the 'service_mode'
+ # A traffic capture may have been started (from a T-Rex console) at this time.
+ # If asked so, we keep the service mode enabled here, and disable it otherwise.
+ # | Disabling the service mode while a capture is in progress
+ # | would cause the application to stop/crash with an error.
if not self.config.service_mode:
self.client.set_service_mode(ports=self.port_handle, enabled=False)
pass
def set_service_mode(self, enabled=True):
- """Enable/disable the 'service_mode'."""
+ """Enable/disable the 'service' mode."""
self.client.set_service_mode(ports=self.port_handle, enabled=enabled)