1 # Copyright 2016 Cisco Systems, Inc. All rights reserved.
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
14 """Driver module for TRex traffic generator."""
22 from functools import reduce
24 from itertools import count
25 # pylint: disable=import-error
26 from scapy.contrib.mpls import MPLS # flake8: noqa
27 # pylint: enable=import-error
28 from nfvbench.log import LOG
29 from nfvbench.specs import ChainType
30 from nfvbench.traffic_server import TRexTrafficServer
31 from nfvbench.utils import cast_integer
32 from nfvbench.utils import timeout
33 from nfvbench.utils import TimeoutError
35 from hdrh.histogram import HdrHistogram
37 # pylint: disable=import-error
38 from trex.common.services.trex_service_arp import ServiceARP
39 from trex.stl.api import ARP
40 from trex.stl.api import bind_layers
41 from trex.stl.api import CTRexVmInsFixHwCs
42 from trex.stl.api import Dot1Q
43 from trex.stl.api import Ether
44 from trex.stl.api import FlagsField
45 from trex.stl.api import IP
46 from trex.stl.api import Packet
47 from trex.stl.api import STLClient
48 from trex.stl.api import STLError
49 from trex.stl.api import STLFlowLatencyStats
50 from trex.stl.api import STLFlowStats
51 from trex.stl.api import STLPktBuilder
52 from trex.stl.api import STLScVmRaw
53 from trex.stl.api import STLStream
54 from trex.stl.api import STLTXCont
55 from trex.stl.api import STLTXMultiBurst
56 from trex.stl.api import STLVmFixChecksumHw
57 from trex.stl.api import STLVmFixIpv4
58 from trex.stl.api import STLVmFlowVar
59 from trex.stl.api import STLVmFlowVarRepeatableRandom
60 from trex.stl.api import STLVmTupleGen
61 from trex.stl.api import STLVmWrFlowVar
62 from trex.stl.api import ThreeBytesField
63 from trex.stl.api import UDP
64 from trex.stl.api import XByteField
66 # pylint: enable=import-error
68 from .traffic_base import AbstractTrafficGenerator
69 from .traffic_base import TrafficGeneratorException
70 from . import traffic_utils as utils
71 from .traffic_utils import IMIX_AVG_L2_FRAME_SIZE
72 from .traffic_utils import IMIX_L2_SIZES
73 from .traffic_utils import IMIX_RATIOS
78 _VXLAN_FLAGS = ['R' * 27] + ['I'] + ['R' * 5]
80 fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS),
81 ThreeBytesField("vni", 0),
82 XByteField("reserved", 0x00)]
86 return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
88 class TRex(AbstractTrafficGenerator):
89 """TRex traffic generator driver."""
92 CHAIN_PG_ID_MASK = 0x007F
93 PORT_PG_ID_MASK = 0x0080
94 LATENCY_PG_ID_MASK = 0x0100
96 def __init__(self, traffic_client):
98 AbstractTrafficGenerator.__init__(self, traffic_client)
101 self.port_handle = []
102 self.chain_count = self.generator_config.service_chain_count
104 self.capture_id = None
105 self.packet_list = []
106 self.l2_frame_size = 0
108 def get_version(self):
109 """Get the Trex version."""
110 return self.client.get_server_version() if self.client else ''
112 def get_pg_id(self, port, chain_id):
113 """Calculate the packet group IDs to use for a given port/stream type/chain_id.
116 chain_id: identifies to which chain the pg_id is associated (0 to 255)
117 return: pg_id, lat_pg_id
119 We use a bit mask to set up the 3 fields:
120 0x007F: chain ID (8 bits for a max of 128 chains)
124 pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
125 return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
127 def extract_stats(self, in_stats, ifstats):
128 """Extract stats from dict returned by Trex API.
130 :param in_stats: dict as returned by TRex api
132 utils.nan_replace(in_stats)
133 # LOG.debug(in_stats)
136 # port_handles should have only 2 elements: [0, 1]
137 # so (1 - ph) will be the index for the far end port
138 for ph in self.port_handle:
140 far_end_stats = in_stats[1 - ph]
143 'total_pkts': cast_integer(stats['opackets']),
144 'total_pkt_bytes': cast_integer(stats['obytes']),
145 'pkt_rate': cast_integer(stats['tx_pps']),
146 'pkt_bit_rate': cast_integer(stats['tx_bps'])
149 'total_pkts': cast_integer(stats['ipackets']),
150 'total_pkt_bytes': cast_integer(stats['ibytes']),
151 'pkt_rate': cast_integer(stats['rx_pps']),
152 'pkt_bit_rate': cast_integer(stats['rx_bps']),
153 # how many pkts were dropped in RX direction
154 # need to take the tx counter on the far end port
155 'dropped_pkts': cast_integer(
156 far_end_stats['opackets'] - stats['ipackets'])
159 self.__combine_latencies(in_stats, result[ph]['rx'], ph)
161 total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
163 # in case of GARP packets we need to base total_tx_pkts value using flow_stats
164 # as no GARP packets have no flow stats and will not be received on the other port
165 if self.config.periodic_gratuitous_arp:
166 if not self.config.no_flow_stats and not self.config.no_latency_stats:
167 global_total_tx_pkts = total_tx_pkts
170 for chain_id, _ in enumerate(ifstats):
171 for ph in self.port_handle:
172 pg_id, lat_pg_id = self.get_pg_id(ph, chain_id)
173 flows_tx_pkts = in_stats['flow_stats'][pg_id]['tx_pkts']['total'] + \
174 in_stats['flow_stats'][lat_pg_id]['tx_pkts']['total']
175 result[ph]['tx']['total_pkts'] = flows_tx_pkts
176 total_tx_pkts += flows_tx_pkts
178 for pg_id in in_stats['flow_stats']:
179 if pg_id != 'global':
180 total_tx_pkts += in_stats['flow_stats'][pg_id]['tx_pkts']['total']
181 result["garp_total_tx_rate"] = cast_integer(
182 (global_total_tx_pkts - total_tx_pkts) / self.config.duration_sec)
184 LOG.warning("Gratuitous ARP are not received by the other port so TRex and NFVbench"
185 " see these packets as dropped. Please do not activate no_flow_stats"
186 " and no_latency_stats properties to have a better drop rate.")
188 result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
189 # actual offered tx rate in bps
190 avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
191 total_tx_bps = utils.pps_to_bps(result["total_tx_rate"], avg_packet_size)
192 result['offered_tx_rate_bps'] = total_tx_bps
194 result.update(self.get_theoretical_rates(avg_packet_size))
196 result["flow_stats"] = in_stats["flow_stats"]
197 result["latency"] = in_stats["latency"]
199 # Merge HDRHistogram to have an overall value for all chains and ports
200 # (provided that the histogram exists in the stats returned by T-Rex)
201 # Of course, empty histograms will produce an empty (invalid) histogram.
205 for chain_id, _ in enumerate(ifstats):
206 for ph in self.port_handle:
207 _, lat_pg_id = self.get_pg_id(ph, chain_id)
209 HdrHistogram.decode(in_stats['latency'][lat_pg_id]['latency']['hdrh']))
211 for pg_id in in_stats['latency']:
212 if pg_id != 'global':
214 HdrHistogram.decode(in_stats['latency'][pg_id]['latency']['hdrh']))
219 decoded_hdrh = reduce(add_hdrh, hdrh_list)
220 result["overall_hdrh"] = HdrHistogram.encode(decoded_hdrh).decode('utf-8')
226 def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
227 """Extract the aggregated stats for a given chain.
229 trex_stats: stats as returned by get_stats()
230 if_stats: a list of 2 interface stats to update (port 0 and 1)
231 latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
232 latencies[p] is the latency for packets sent on port p
233 if there are no latency streams, the Latency instances are not modified
234 chain_idx: chain index of the interface stats
236 The packet counts include normal and latency streams.
238 Trex returns flows stats as follows:
240 'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
241 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
242 'rx_bytes': {0: nan, 1: nan, 'total': nan},
243 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
244 'rx_pps': {0: 0, 1: 0, 'total': 0},
245 'tx_bps': {0: 0, 1: 0, 'total': 0},
246 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
247 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
248 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
249 'tx_pps': {0: 0, 1: 0, 'total': 0}},
250 1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
251 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
252 'rx_bytes': {0: nan, 1: nan, 'total': nan},
253 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
254 'rx_pps': {0: 0, 1: 0, 'total': 0},
255 'tx_bps': {0: 0, 1: 0, 'total': 0},
256 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
257 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
258 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
259 'tx_pps': {0: 0, 1: 0, 'total': 0}},
260 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
261 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
262 'rx_bytes': {0: nan, 1: nan, 'total': nan},
263 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
264 'rx_pps': {0: 0, 1: 0, 'total': 0},
265 'tx_bps': {0: 0, 1: 0, 'total': 0},
266 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
267 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
268 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
269 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
271 the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
273 packet counters for a given stream sent on port p are reported as:
274 - tx_pkts[p] on port p
275 - rx_pkts[1-p] on the far end port
277 This is a tricky/critical counter transposition operation because
278 the results are grouped by port (not by stream):
279 tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
280 rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
281 tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
282 rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
284 or using a more generic formula:
285 tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
286 rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
288 the second formula is equivalent to
289 rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
291 If there are latency streams, those same counters need to be added in the same way
293 def get_latency(lval):
295 return int(round(lval))
301 for port in range(2):
302 pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
303 for pid in [pg_id, lat_pg_id]:
305 pg_stats = trex_stats['flow_stats'][pid]
306 if_stats[port].tx += pg_stats['tx_pkts'][port]
307 if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
311 lat = trex_stats['latency'][lat_pg_id]['latency']
312 # dropped_pkts += lat['err_cntrs']['dropped']
313 latencies[port].max_usec = get_latency(lat['total_max'])
314 if math.isnan(lat['total_min']):
315 latencies[port].min_usec = 0
316 latencies[port].avg_usec = 0
318 latencies[port].min_usec = get_latency(lat['total_min'])
319 latencies[port].avg_usec = get_latency(lat['average'])
320 # pick up the HDR histogram if present (otherwise will raise KeyError)
321 latencies[port].hdrh = lat['hdrh']
325 def __combine_latencies(self, in_stats, results, port_handle):
326 """Traverse TRex result dictionary and combines chosen latency stats.
328 example of latency dict returned by trex (2 chains):
329 'latency': {256: {'err_cntrs': {'dropped': 0,
334 'latency': {'average': 26.5,
335 'hdrh': u'HISTFAAAAEx4nJNpmSgz...bFRgxi',
336 'histogram': {20: 303,
346 257: {'err_cntrs': {'dropped': 0,
351 'latency': {'average': 29.75,
352 'hdrh': u'HISTFAAAAEV4nJN...CALilDG0=',
353 'histogram': {20: 261,
362 384: {'err_cntrs': {'dropped': 0,
367 'latency': {'average': 18.0,
368 'hdrh': u'HISTFAAAADR4nJNpm...MjCwDDxAZG',
369 'histogram': {20: 987, 30: 14},
374 385: {'err_cntrs': {'dropped': 0,
379 'latency': {'average': 19.0,
380 'hdrh': u'HISTFAAAADR4nJNpm...NkYmJgDdagfK',
381 'histogram': {20: 989, 30: 11},
386 'global': {'bad_hdr': 0, 'old_flow': 0}},
390 total_min = float("inf")
391 for chain_id in range(self.chain_count):
393 _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
394 lat = in_stats['latency'][lat_pg_id]['latency']
395 # dropped_pkts += lat['err_cntrs']['dropped']
396 total_max = max(lat['total_max'], total_max)
397 total_min = min(lat['total_min'], total_min)
398 average += lat['average']
401 if total_min == float("inf"):
403 results['min_delay_usec'] = total_min
404 results['max_delay_usec'] = total_max
405 results['avg_delay_usec'] = int(average / self.chain_count)
407 def _bind_vxlan(self):
408 bind_layers(UDP, VXLAN, dport=4789)
409 bind_layers(VXLAN, Ether)
411 def _create_pkt(self, stream_cfg, l2frame_size, disable_random_latency_flow=False):
412 """Create a packet of given size.
414 l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
416 # Trex will add the FCS field, so we need to remove 4 bytes from the l2 frame size
417 frame_size = int(l2frame_size) - 4
419 if stream_cfg['vxlan'] is True:
422 pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
423 if stream_cfg['vtep_vlan'] is not None:
424 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
425 pkt_base /= IP(src=stream_cfg['vtep_src_ip'], dst=stream_cfg['vtep_dst_ip'])
426 pkt_base /= UDP(sport=random.randint(1337, 32767), dport=4789)
427 pkt_base /= VXLAN(vni=stream_cfg['net_vni'])
428 pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
429 # need to randomize the outer header UDP src port based on flow
430 vxlan_udp_src_fv = STLVmFlowVar(
431 name="vxlan_udp_src",
436 vm_param = [vxlan_udp_src_fv,
437 STLVmWrFlowVar(fv_name="vxlan_udp_src", pkt_offset="UDP.sport")]
438 elif stream_cfg['mpls'] is True:
440 pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
441 if stream_cfg['vtep_vlan'] is not None:
442 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
443 if stream_cfg['mpls_outer_label'] is not None:
444 pkt_base /= MPLS(label=stream_cfg['mpls_outer_label'], cos=1, s=0, ttl=255)
445 if stream_cfg['mpls_inner_label'] is not None:
446 pkt_base /= MPLS(label=stream_cfg['mpls_inner_label'], cos=1, s=1, ttl=255)
447 # Flow stats and MPLS labels randomization TBD
448 pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
451 pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
453 if stream_cfg['vlan_tag'] is not None:
454 pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
457 if stream_cfg['udp_src_port']:
458 udp_args['sport'] = int(stream_cfg['udp_src_port'])
459 if stream_cfg['udp_port_step'] == 'random':
462 step = stream_cfg['udp_port_step']
463 udp_args['sport_step'] = int(step)
464 udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
465 if stream_cfg['udp_dst_port']:
466 udp_args['dport'] = int(stream_cfg['udp_dst_port'])
467 if stream_cfg['udp_port_step'] == 'random':
470 step = stream_cfg['udp_port_step']
471 udp_args['dport_step'] = int(step)
472 udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
474 pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
475 UDP(dport=udp_args['dport'], sport=udp_args['sport'])
477 # STLVmTupleGen need flow count >= cores used by TRex, if FC < cores we used STLVmFlowVar
478 if stream_cfg['ip_addrs_step'] == '0.0.0.1' and stream_cfg['udp_port_step'] == '1' and \
479 stream_cfg['count'] >= self.generator_config.cores:
480 src_fv = STLVmTupleGen(ip_min=stream_cfg['ip_src_addr'],
481 ip_max=stream_cfg['ip_src_addr_max'],
482 port_min=udp_args['sport'],
483 port_max=udp_args['sport_max'],
485 limit_flows=stream_cfg['count'])
486 dst_fv = STLVmTupleGen(ip_min=stream_cfg['ip_dst_addr'],
487 ip_max=stream_cfg['ip_dst_addr_max'],
488 port_min=udp_args['dport'],
489 port_max=udp_args['dport_max'],
491 limit_flows=stream_cfg['count'])
494 STLVmWrFlowVar(fv_name="tuple_src.ip",
495 pkt_offset="IP:{}.src".format(encap_level)),
496 STLVmWrFlowVar(fv_name="tuple_src.port",
497 pkt_offset="UDP:{}.sport".format(encap_level)),
499 STLVmWrFlowVar(fv_name="tuple_dst.ip",
500 pkt_offset="IP:{}.dst".format(encap_level)),
501 STLVmWrFlowVar(fv_name="tuple_dst.port",
502 pkt_offset="UDP:{}.dport".format(encap_level)),
505 if disable_random_latency_flow:
506 src_fv_ip = STLVmFlowVar(
508 min_value=stream_cfg['ip_src_addr'],
509 max_value=stream_cfg['ip_src_addr'],
511 dst_fv_ip = STLVmFlowVar(
513 min_value=stream_cfg['ip_dst_addr'],
514 max_value=stream_cfg['ip_dst_addr'],
516 elif stream_cfg['ip_addrs_step'] == 'random':
517 src_fv_ip = STLVmFlowVarRepeatableRandom(
519 min_value=stream_cfg['ip_src_addr'],
520 max_value=stream_cfg['ip_src_addr_max'],
522 seed=random.randint(0, 32767),
523 limit=stream_cfg['ip_src_count'])
524 dst_fv_ip = STLVmFlowVarRepeatableRandom(
526 min_value=stream_cfg['ip_dst_addr'],
527 max_value=stream_cfg['ip_dst_addr_max'],
529 seed=random.randint(0, 32767),
530 limit=stream_cfg['ip_dst_count'])
532 src_fv_ip = STLVmFlowVar(
534 min_value=stream_cfg['ip_src_addr'],
535 max_value=stream_cfg['ip_src_addr_max'],
538 step=stream_cfg['ip_addrs_step'])
539 dst_fv_ip = STLVmFlowVar(
541 min_value=stream_cfg['ip_dst_addr'],
542 max_value=stream_cfg['ip_dst_addr_max'],
545 step=stream_cfg['ip_addrs_step'])
547 if disable_random_latency_flow:
548 src_fv_port = STLVmFlowVar(
550 min_value=udp_args['sport'],
551 max_value=udp_args['sport'],
553 dst_fv_port = STLVmFlowVar(
555 min_value=udp_args['dport'],
556 max_value=udp_args['dport'],
558 elif stream_cfg['udp_port_step'] == 'random':
559 src_fv_port = STLVmFlowVarRepeatableRandom(
561 min_value=udp_args['sport'],
562 max_value=udp_args['sport_max'],
564 seed=random.randint(0, 32767),
565 limit=stream_cfg['udp_src_count'])
566 dst_fv_port = STLVmFlowVarRepeatableRandom(
568 min_value=udp_args['dport'],
569 max_value=udp_args['dport_max'],
571 seed=random.randint(0, 32767),
572 limit=stream_cfg['udp_dst_count'])
574 src_fv_port = STLVmFlowVar(
576 min_value=udp_args['sport'],
577 max_value=udp_args['sport_max'],
580 step=udp_args['sport_step'])
581 dst_fv_port = STLVmFlowVar(
583 min_value=udp_args['dport'],
584 max_value=udp_args['dport_max'],
587 step=udp_args['dport_step'])
590 STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
592 STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
594 STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
596 STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
598 # Use HW Offload to calculate the outter IP/UDP packet
599 vm_param.append(STLVmFixChecksumHw(l3_offset="IP:0",
601 l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP))
602 # Use software to fix the inner IP/UDP payload for VxLAN packets
604 vm_param.append(STLVmFixIpv4(offset="IP:1"))
605 pad = max(0, frame_size - len(pkt_base)) * 'x'
607 return STLPktBuilder(pkt=pkt_base / pad,
608 vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
610 def _create_gratuitous_arp_pkt(self, stream_cfg):
611 """Create a GARP packet.
614 pkt_base = Ether(src=stream_cfg['mac_src'], dst="ff:ff:ff:ff:ff:ff")
616 if self.config.vxlan or self.config.mpls:
617 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
618 elif stream_cfg['vlan_tag'] is not None:
619 pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
621 pkt_base /= ARP(psrc=stream_cfg['ip_src_tg_gw'], hwsrc=stream_cfg['mac_src'],
622 hwdst=stream_cfg['mac_src'], pdst=stream_cfg['ip_src_tg_gw'])
624 return STLPktBuilder(pkt=pkt_base)
626 def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
628 """Create a list of streams corresponding to a given chain and stream config.
630 port: port where the streams originate (0 or 1)
631 chain_id: the chain to which the streams are associated to
632 stream_cfg: stream configuration
633 l2frame: L2 frame size (including 4-byte FCS) or 'IMIX'
634 latency: if True also create a latency stream
635 e2e: True if performing "end to end" connectivity check
638 pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
639 if l2frame == 'IMIX':
640 for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
641 pkt = self._create_pkt(stream_cfg, l2_frame_size)
642 if e2e or stream_cfg['mpls']:
643 streams.append(STLStream(packet=pkt,
644 mode=STLTXCont(pps=ratio)))
646 if stream_cfg['vxlan'] is True:
647 streams.append(STLStream(packet=pkt,
648 flow_stats=STLFlowStats(pg_id=pg_id,
650 if not self.config.no_flow_stats else None,
651 mode=STLTXCont(pps=ratio)))
653 streams.append(STLStream(packet=pkt,
654 flow_stats=STLFlowStats(pg_id=pg_id)
655 if not self.config.no_flow_stats else None,
656 mode=STLTXCont(pps=ratio)))
659 # for IMIX, the latency packets have the average IMIX packet size
660 if stream_cfg['ip_addrs_step'] == 'random' or \
661 stream_cfg['udp_port_step'] == 'random':
662 # Force latency flow to only one flow to avoid creating flows
663 # over requested flow count
664 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE, True)
666 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
669 l2frame_size = int(l2frame)
670 pkt = self._create_pkt(stream_cfg, l2frame_size)
671 if self.config.periodic_gratuitous_arp:
672 requested_pps = int(utils.parse_rate_str(self.rates[0])[
673 'rate_pps']) - self.config.gratuitous_arp_pps
675 requested_pps -= self.LATENCY_PPS
676 stltx_cont = STLTXCont(pps=requested_pps)
678 stltx_cont = STLTXCont()
679 if e2e or stream_cfg['mpls']:
680 streams.append(STLStream(packet=pkt,
681 # Flow stats is disabled for MPLS now
682 # flow_stats=STLFlowStats(pg_id=pg_id),
685 if stream_cfg['vxlan'] is True:
686 streams.append(STLStream(packet=pkt,
687 flow_stats=STLFlowStats(pg_id=pg_id,
689 if not self.config.no_flow_stats else None,
692 streams.append(STLStream(packet=pkt,
693 flow_stats=STLFlowStats(pg_id=pg_id)
694 if not self.config.no_flow_stats else None,
696 # for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
697 # without vlan, the min l2 frame size is 64
699 # This only applies to the latency stream
701 if stream_cfg['vlan_tag'] and l2frame_size < 68:
703 if stream_cfg['ip_addrs_step'] == 'random' or \
704 stream_cfg['udp_port_step'] == 'random':
705 # Force latency flow to only one flow to avoid creating flows
706 # over requested flow count
707 pkt = self._create_pkt(stream_cfg, l2frame_size, True)
709 pkt = self._create_pkt(stream_cfg, l2frame_size)
712 if self.config.no_latency_stats:
713 LOG.info("Latency flow statistics are disabled.")
714 if stream_cfg['vxlan'] is True:
715 streams.append(STLStream(packet=pkt,
716 flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
718 if not self.config.no_latency_stats else None,
719 mode=STLTXCont(pps=self.LATENCY_PPS)))
721 streams.append(STLStream(packet=pkt,
722 flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
723 if not self.config.no_latency_stats else None,
724 mode=STLTXCont(pps=self.LATENCY_PPS)))
726 if self.config.periodic_gratuitous_arp and (
727 self.config.l3_router or self.config.service_chain == ChainType.EXT):
728 # In case of L3 router feature or EXT chain with router
729 # and depending on ARP stale time SUT configuration
730 # Gratuitous ARP from TG port to the router is needed to keep traffic up
731 garp_pkt = self._create_gratuitous_arp_pkt(stream_cfg)
732 ibg = self.config.gratuitous_arp_pps * 1000000.0
733 packets_count = int(self.config.duration_sec / self.config.gratuitous_arp_pps)
735 STLStream(packet=garp_pkt,
736 mode=STLTXMultiBurst(pkts_per_burst=1, count=packets_count, ibg=ibg)))
740 def __connect(self, client):
743 def __local_server_status(self):
744 """ The TRex server may have started but failed initializing... and stopped.
745 This piece of code is especially designed to address
746 the case when a fatal failure occurs on a DPDK init call.
747 The TRex algorihm should be revised to include some missing timeouts (?)
750 1: fatal error detected - should lead to exiting the run
751 2: error detected that could be solved by starting again
752 The diagnostic is based on parsing the local trex log file (improvable)
764 with open('/tmp/trex.log', 'r', encoding="utf-8") as trex_log:
765 for _line in trex_log:
767 if line.startswith('Usage:'):
769 if 'ports are bound' in line:
771 if 'please wait' in line:
773 if 'exit' in line.lower():
775 elif 'cause' in line.lower():
777 elif 'fail' in line.lower():
779 elif 'msg' in line.lower():
781 elif (error is not None) and line:
783 elif line.startswith('Error:') or line.startswith('ERROR'):
787 except FileNotFoundError:
789 if exited is not None:
791 LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', exited)
792 if cause is not None:
793 LOG.info("TRex [cont'd] %s", cause)
794 if failure is not None:
795 LOG.info("TRex [cont'd] %s", failure)
796 if message is not None:
797 LOG.info("TRex [cont'd] %s", message)
798 if 'not supported yet' in message.lower():
799 LOG.info("TRex [cont'd] Try starting again!")
801 elif error is not None:
803 LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', error)
804 if after is not None:
805 LOG.info("TRex [cont'd] %s", after)
806 elif before is not None:
807 LOG.info("TRex [cont'd] %s", before)
810 def __connect_after_start(self):
811 # after start, Trex may take a bit of time to initialize
812 # so we need to retry a few times
813 # we try to capture recoverable error cases (checking status)
815 for it in range(self.config.generic_retry_count):
818 self.client.connect()
820 except Exception as ex:
821 if it == (self.config.generic_retry_count - 1):
823 status = self.__local_server_status()
825 # No need to wait anymore, something went wrong and TRex exited
827 LOG.info("\x1b[1m%s\x1b[0m", 'TRex failed starting!')
828 print("More information? Try the command: "
829 + "\x1b[1mnfvbench --show-trex-log\x1b[0m")
832 # a new start will follow
834 LOG.info("Retrying connection to TRex (%s)...", ex.msg)
838 """Connect to the TRex server."""
840 server_ip = self.generator_config.ip
841 LOG.info("Connecting to TRex (%s)...", server_ip)
843 # Connect to TRex server
844 self.client = STLClient(server=server_ip, sync_port=self.generator_config.zmq_rpc_port,
845 async_port=self.generator_config.zmq_pub_port)
847 self.__connect(self.client)
848 if server_ip == '127.0.0.1':
849 config_updated = self.__check_config()
850 if config_updated or self.config.restart:
851 status = self.__restart()
852 except (TimeoutError, STLError) as e:
853 if server_ip == '127.0.0.1':
854 status = self.__start_local_server()
856 raise TrafficGeneratorException(e.message) from e
859 # Workaround in case of a failed TRex server initialization
860 # we try to start it again (twice maximum)
861 # which may allow low level initialization to complete.
862 if self.__start_local_server() == 2:
863 self.__start_local_server()
865 ports = list(self.generator_config.ports)
866 self.port_handle = ports
868 self.client.reset(ports)
869 # Read HW information from each port
870 # this returns an array of dict (1 per port)
872 Example of output for Intel XL710
873 [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
874 'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
875 u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
876 u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
877 u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
878 'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
879 u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
880 'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
881 'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
882 'layer_mode': 'Ethernet', u'numa': 0}, ...]
884 self.port_info = self.client.get_port_info(ports)
885 LOG.info('Connected to TRex')
886 for id, port in enumerate(self.port_info):
887 LOG.info(' Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
888 id, port['description'], port['speed'], port['src_mac'],
889 port['pci_addr'], port['driver'])
890 # Make sure the 2 ports have the same speed
891 if self.port_info[0]['speed'] != self.port_info[1]['speed']:
892 raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
893 (self.port_info[0]['speed'],
894 self.port_info[1]['speed']))
896 def __start_local_server(self):
898 LOG.info("Starting TRex ...")
899 self.__start_server()
900 status = self.__connect_after_start()
901 except (TimeoutError, STLError) as e:
902 LOG.error('Cannot connect to TRex')
903 LOG.error(traceback.format_exc())
904 logpath = '/tmp/trex.log'
905 if os.path.isfile(logpath):
906 # Wait for TRex to finish writing error message
908 for _ in range(self.config.generic_retry_count):
909 size = os.path.getsize(logpath)
910 if size == last_size:
911 # probably not writing anymore
915 with open(logpath, 'r', encoding="utf-8") as f:
919 raise TrafficGeneratorException(message) from e
922 def __start_server(self):
923 server = TRexTrafficServer()
924 server.run_server(self.generator_config)
926 def __check_config(self):
927 server = TRexTrafficServer()
928 return server.check_config_updated(self.generator_config)
931 LOG.info("Restarting TRex ...")
933 # Wait for server stopped
934 for _ in range(self.config.generic_retry_count):
936 if not self.client.is_connected():
937 LOG.info("TRex is stopped...")
939 # Start and report a possible failure
940 return self.__start_local_server()
942 def __stop_server(self):
943 if self.generator_config.ip == '127.0.0.1':
944 ports = self.client.get_acquired_ports()
945 LOG.info('Release ports %s and stopping TRex...', ports)
948 self.client.release(ports=ports)
949 self.client.server_shutdown()
950 except STLError as e:
951 LOG.warning('Unable to stop TRex. Error: %s', e)
953 LOG.info('Using remote TRex. Unable to stop TRex')
955 def resolve_arp(self):
956 """Resolve all configured remote IP addresses.
958 return: None if ARP failed to resolve for all IP addresses
959 else a dict of list of dest macs indexed by port#
960 the dest macs in the list are indexed by the chain id
962 self.client.set_service_mode(ports=self.port_handle)
963 LOG.info('Polling ARP until successful...')
965 for port, device in zip(self.port_handle, self.generator_config.devices):
966 # there should be 1 stream config per chain
967 stream_configs = device.get_stream_configs()
968 chain_count = len(stream_configs)
969 ctx = self.client.create_service_ctx(port=port)
970 # all dest macs on this port indexed by chain ID
971 dst_macs = [None] * chain_count
973 # the index in the list is the chain id
974 if self.config.vxlan or self.config.mpls:
977 src_ip=device.vtep_src_ip,
978 dst_ip=device.vtep_dst_ip,
979 vlan=device.vtep_vlan)
980 for cfg in stream_configs
985 src_ip=cfg['ip_src_tg_gw'],
986 dst_ip=cfg['mac_discovery_gw'],
987 # will be None if no vlan tagging
988 vlan=cfg['vlan_tag'])
989 for cfg in stream_configs
992 for attempt in range(self.config.generic_retry_count):
996 LOG.error(traceback.format_exc())
1000 for chain_id, mac in enumerate(dst_macs):
1002 arp_record = arps[chain_id].get_record()
1003 if arp_record.dst_mac:
1004 dst_macs[chain_id] = arp_record.dst_mac
1006 LOG.info(' ARP: port=%d chain=%d src IP=%s dst IP=%s -> MAC=%s',
1009 arp_record.dst_ip, arp_record.dst_mac)
1011 unresolved.append(arp_record.dst_ip)
1012 if dst_macs_count == chain_count:
1013 arp_dest_macs[port] = dst_macs
1014 LOG.info('ARP resolved successfully for port %s', port)
1018 LOG.info('Retrying ARP for: %s (retry %d/%d)',
1019 unresolved, retry, self.config.generic_retry_count)
1020 if retry < self.config.generic_retry_count:
1021 time.sleep(self.config.generic_poll_sec)
1023 LOG.error('ARP timed out for port %s (resolved %d out of %d)',
1029 # A traffic capture may have been started (from a T-Rex console) at this time.
1030 # If asked so, we keep the service mode enabled here, and disable it otherwise.
1031 # | Disabling the service mode while a capture is in progress
1032 # | would cause the application to stop/crash with an error.
1033 if not self.config.service_mode:
1034 self.client.set_service_mode(ports=self.port_handle, enabled=False)
1035 if len(arp_dest_macs) == len(self.port_handle):
1036 return arp_dest_macs
1039 def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
1040 """Check if rate provided by user is above requirements. Applies only if latency is True."""
1041 intf_speed = self.generator_config.intf_speed
1047 r = utils.convert_rates(l2frame_size, rate, intf_speed)
1048 total_rate += int(r['rate_pps'])
1051 r = utils.convert_rates(l2frame_size, rates[0], intf_speed)
1052 total_rate = int(r['rate_pps'])
1053 # rate must be enough for latency stream and at least 1 pps for base stream per chain
1054 if self.config.periodic_gratuitous_arp:
1055 required_rate = (self.LATENCY_PPS + 1 + self.config.gratuitous_arp_pps) \
1056 * self.config.service_chain_count * mult
1058 required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
1059 result = utils.convert_rates(l2frame_size,
1060 {'rate_pps': required_rate},
1062 result['result'] = total_rate >= required_rate
1065 return {'result': True}
1067 def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
1068 """Program all the streams in Trex server.
1070 l2frame_size: L2 frame size or IMIX
1071 rates: a list of 2 rates to run each direction
1072 each rate is a dict like {'rate_pps': '10kpps'}
1073 bidirectional: True if bidirectional
1074 latency: True if latency measurement is needed
1075 e2e: True if performing "end to end" connectivity check
1077 if self.config.no_flow_stats:
1078 LOG.info("Traffic flow statistics are disabled.")
1079 r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
1081 raise TrafficGeneratorException(
1082 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
1083 .format(pps=r['rate_pps'],
1085 load=r['rate_percent']))
1086 self.l2_frame_size = l2frame_size
1087 # a dict of list of streams indexed by port#
1088 # in case of fixed size, has self.chain_count * 2 * 2 streams
1089 # (1 normal + 1 latency stream per direction per chain)
1090 # for IMIX, has self.chain_count * 2 * 4 streams
1091 # (3 normal + 1 latency stream per direction per chain)
1093 for port in self.port_handle:
1094 streamblock[port] = []
1095 stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
1096 if self.generator_config.ip_addrs_step == 'random' \
1097 or self.generator_config.gen_config.udp_port_step == 'random':
1098 LOG.warning("Using random step, the number of flows can be less than "
1099 "the requested number of flows due to repeatable multivariate random "
1100 "generation which can reproduce the same pattern of values")
1101 self.rates = [utils.to_rate_str(rate) for rate in rates]
1102 for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
1103 streamblock[0].extend(self.generate_streams(self.port_handle[0],
1109 if len(self.rates) > 1:
1110 streamblock[1].extend(self.generate_streams(self.port_handle[1],
1114 latency=bidirectional and latency,
1117 for port in self.port_handle:
1118 if self.config.vxlan:
1119 self.client.set_port_attr(ports=port, vxlan_fs=[4789])
1121 self.client.set_port_attr(ports=port, vxlan_fs=None)
1122 self.client.add_streams(streamblock[port], ports=port)
1123 LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
1125 def clear_streamblock(self):
1126 """Clear all streams from TRex."""
1128 self.client.reset(self.port_handle)
1129 LOG.info('Cleared all existing streams')
1131 def get_stats(self, ifstats=None):
1132 """Get stats from Trex."""
1133 stats = self.client.get_stats()
1134 return self.extract_stats(stats, ifstats)
1137 """Return the Trex local port MAC addresses.
1139 return: a list of MAC addresses indexed by the port#
1141 return [port['src_mac'] for port in self.port_info]
1143 def get_port_speed_gbps(self):
1144 """Return the Trex local port MAC addresses.
1146 return: a list of speed in Gbps indexed by the port#
1148 return [port['speed'] for port in self.port_info]
1150 def clear_stats(self):
1151 """Clear all stats in the traffic gneerator."""
1152 if self.port_handle:
1153 self.client.clear_stats()
1155 def start_traffic(self):
1156 """Start generating traffic in all ports."""
1157 for port, rate in zip(self.port_handle, self.rates):
1158 self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
1160 def stop_traffic(self):
1161 """Stop generating traffic."""
1162 self.client.stop(ports=self.port_handle)
1164 def start_capture(self):
1165 """Capture all packets on both ports that are unicast to us."""
1168 # Need to filter out unwanted packets so we do not end up counting
1169 # src MACs of frames that are not unicast to us
1170 src_mac_list = self.get_macs()
1171 bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
1172 # ports must be set in service in order to enable capture
1173 self.client.set_service_mode(ports=self.port_handle)
1174 self.capture_id = self.client.start_capture \
1175 (rx_ports=self.port_handle, bpf_filter=bpf_filter)
1177 def fetch_capture_packets(self):
1178 """Fetch capture packets in capture mode."""
1180 self.packet_list = []
1181 self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
1182 output=self.packet_list)
1184 def stop_capture(self):
1185 """Stop capturing packets."""
1187 self.client.stop_capture(capture_id=self.capture_id['id'])
1188 self.capture_id = None
1189 # A traffic capture may have been started (from a T-Rex console) at this time.
1190 # If asked so, we keep the service mode enabled here, and disable it otherwise.
1191 # | Disabling the service mode while a capture is in progress
1192 # | would cause the application to stop/crash with an error.
1193 if not self.config.service_mode:
1194 self.client.set_service_mode(ports=self.port_handle, enabled=False)
1197 """Cleanup Trex driver."""
1200 self.client.reset(self.port_handle)
1201 self.client.disconnect()
1203 # TRex does not like a reset while in disconnected state
1206 def set_service_mode(self, enabled=True):
1207 """Enable/disable the 'service' mode."""
1208 self.client.set_service_mode(ports=self.port_handle, enabled=enabled)