1 # Copyright 2016 Cisco Systems, Inc. All rights reserved.
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
14 """Driver module for TRex traffic generator."""
22 from functools import reduce
24 from itertools import count
25 # pylint: disable=import-error
26 from scapy.contrib.mpls import MPLS # flake8: noqa
27 # pylint: enable=import-error
28 from nfvbench.log import LOG
29 from nfvbench.traffic_server import TRexTrafficServer
30 from nfvbench.utils import cast_integer
31 from nfvbench.utils import timeout
32 from nfvbench.utils import TimeoutError
34 from hdrh.histogram import HdrHistogram
36 # pylint: disable=import-error
37 from trex.common.services.trex_service_arp import ServiceARP
38 from trex.stl.api import bind_layers
39 from trex.stl.api import CTRexVmInsFixHwCs
40 from trex.stl.api import Dot1Q
41 from trex.stl.api import Ether
42 from trex.stl.api import FlagsField
43 from trex.stl.api import IP
44 from trex.stl.api import Packet
45 from trex.stl.api import STLClient
46 from trex.stl.api import STLError
47 from trex.stl.api import STLFlowLatencyStats
48 from trex.stl.api import STLFlowStats
49 from trex.stl.api import STLPktBuilder
50 from trex.stl.api import STLScVmRaw
51 from trex.stl.api import STLStream
52 from trex.stl.api import STLTXCont
53 from trex.stl.api import STLVmFixChecksumHw
54 from trex.stl.api import STLVmFixIpv4
55 from trex.stl.api import STLVmFlowVar
56 from trex.stl.api import STLVmFlowVarRepeatableRandom
57 from trex.stl.api import STLVmTupleGen
58 from trex.stl.api import STLVmWrFlowVar
59 from trex.stl.api import ThreeBytesField
60 from trex.stl.api import UDP
61 from trex.stl.api import XByteField
63 # pylint: enable=import-error
65 from .traffic_base import AbstractTrafficGenerator
66 from .traffic_base import TrafficGeneratorException
67 from . import traffic_utils as utils
68 from .traffic_utils import IMIX_AVG_L2_FRAME_SIZE
69 from .traffic_utils import IMIX_L2_SIZES
70 from .traffic_utils import IMIX_RATIOS
75 _VXLAN_FLAGS = ['R' * 27] + ['I'] + ['R' * 5]
77 fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS),
78 ThreeBytesField("vni", 0),
79 XByteField("reserved", 0x00)]
83 return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
85 class TRex(AbstractTrafficGenerator):
86 """TRex traffic generator driver."""
89 CHAIN_PG_ID_MASK = 0x007F
90 PORT_PG_ID_MASK = 0x0080
91 LATENCY_PG_ID_MASK = 0x0100
93 def __init__(self, traffic_client):
95 AbstractTrafficGenerator.__init__(self, traffic_client)
99 self.chain_count = self.generator_config.service_chain_count
101 self.capture_id = None
102 self.packet_list = []
103 self.l2_frame_size = 0
105 def get_version(self):
106 """Get the Trex version."""
107 return self.client.get_server_version() if self.client else ''
109 def get_pg_id(self, port, chain_id):
110 """Calculate the packet group IDs to use for a given port/stream type/chain_id.
113 chain_id: identifies to which chain the pg_id is associated (0 to 255)
114 return: pg_id, lat_pg_id
116 We use a bit mask to set up the 3 fields:
117 0x007F: chain ID (8 bits for a max of 128 chains)
121 pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
122 return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
124 def extract_stats(self, in_stats, ifstats):
125 """Extract stats from dict returned by Trex API.
127 :param in_stats: dict as returned by TRex api
129 utils.nan_replace(in_stats)
130 # LOG.debug(in_stats)
133 # port_handles should have only 2 elements: [0, 1]
134 # so (1 - ph) will be the index for the far end port
135 for ph in self.port_handle:
137 far_end_stats = in_stats[1 - ph]
140 'total_pkts': cast_integer(stats['opackets']),
141 'total_pkt_bytes': cast_integer(stats['obytes']),
142 'pkt_rate': cast_integer(stats['tx_pps']),
143 'pkt_bit_rate': cast_integer(stats['tx_bps'])
146 'total_pkts': cast_integer(stats['ipackets']),
147 'total_pkt_bytes': cast_integer(stats['ibytes']),
148 'pkt_rate': cast_integer(stats['rx_pps']),
149 'pkt_bit_rate': cast_integer(stats['rx_bps']),
150 # how many pkts were dropped in RX direction
151 # need to take the tx counter on the far end port
152 'dropped_pkts': cast_integer(
153 far_end_stats['opackets'] - stats['ipackets'])
156 self.__combine_latencies(in_stats, result[ph]['rx'], ph)
158 total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
159 result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
160 # actual offered tx rate in bps
161 avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
162 total_tx_bps = utils.pps_to_bps(result["total_tx_rate"], avg_packet_size)
163 result['offered_tx_rate_bps'] = total_tx_bps
165 result.update(self.get_theoretical_rates(avg_packet_size))
167 result["flow_stats"] = in_stats["flow_stats"]
168 result["latency"] = in_stats["latency"]
170 # Merge HDRHistogram to have an overall value for all chains and ports
171 # (provided that the histogram exists in the stats returned by T-Rex)
172 # Of course, empty histograms will produce an empty (invalid) histogram.
176 for chain_id, _ in enumerate(ifstats):
177 for ph in self.port_handle:
178 _, lat_pg_id = self.get_pg_id(ph, chain_id)
180 HdrHistogram.decode(in_stats['latency'][lat_pg_id]['latency']['hdrh']))
182 for pg_id in in_stats['latency']:
183 if pg_id != 'global':
185 HdrHistogram.decode(in_stats['latency'][pg_id]['latency']['hdrh']))
190 decoded_hdrh = reduce(add_hdrh, hdrh_list)
191 result["overall_hdrh"] = HdrHistogram.encode(decoded_hdrh).decode('utf-8')
197 def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
198 """Extract the aggregated stats for a given chain.
200 trex_stats: stats as returned by get_stats()
201 if_stats: a list of 2 interface stats to update (port 0 and 1)
202 latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
203 latencies[p] is the latency for packets sent on port p
204 if there are no latency streams, the Latency instances are not modified
205 chain_idx: chain index of the interface stats
207 The packet counts include normal and latency streams.
209 Trex returns flows stats as follows:
211 'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
212 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
213 'rx_bytes': {0: nan, 1: nan, 'total': nan},
214 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
215 'rx_pps': {0: 0, 1: 0, 'total': 0},
216 'tx_bps': {0: 0, 1: 0, 'total': 0},
217 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
218 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
219 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
220 'tx_pps': {0: 0, 1: 0, 'total': 0}},
221 1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
222 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
223 'rx_bytes': {0: nan, 1: nan, 'total': nan},
224 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
225 'rx_pps': {0: 0, 1: 0, 'total': 0},
226 'tx_bps': {0: 0, 1: 0, 'total': 0},
227 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
228 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
229 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
230 'tx_pps': {0: 0, 1: 0, 'total': 0}},
231 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
232 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
233 'rx_bytes': {0: nan, 1: nan, 'total': nan},
234 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
235 'rx_pps': {0: 0, 1: 0, 'total': 0},
236 'tx_bps': {0: 0, 1: 0, 'total': 0},
237 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
238 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
239 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
240 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
242 the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
244 packet counters for a given stream sent on port p are reported as:
245 - tx_pkts[p] on port p
246 - rx_pkts[1-p] on the far end port
248 This is a tricky/critical counter transposition operation because
249 the results are grouped by port (not by stream):
250 tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
251 rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
252 tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
253 rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
255 or using a more generic formula:
256 tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
257 rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
259 the second formula is equivalent to
260 rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
262 If there are latency streams, those same counters need to be added in the same way
264 def get_latency(lval):
266 return int(round(lval))
272 for port in range(2):
273 pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
274 for pid in [pg_id, lat_pg_id]:
276 pg_stats = trex_stats['flow_stats'][pid]
277 if_stats[port].tx += pg_stats['tx_pkts'][port]
278 if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
282 lat = trex_stats['latency'][lat_pg_id]['latency']
283 # dropped_pkts += lat['err_cntrs']['dropped']
284 latencies[port].max_usec = get_latency(lat['total_max'])
285 if math.isnan(lat['total_min']):
286 latencies[port].min_usec = 0
287 latencies[port].avg_usec = 0
289 latencies[port].min_usec = get_latency(lat['total_min'])
290 latencies[port].avg_usec = get_latency(lat['average'])
291 # pick up the HDR histogram if present (otherwise will raise KeyError)
292 latencies[port].hdrh = lat['hdrh']
296 def __combine_latencies(self, in_stats, results, port_handle):
297 """Traverse TRex result dictionary and combines chosen latency stats.
299 example of latency dict returned by trex (2 chains):
300 'latency': {256: {'err_cntrs': {'dropped': 0,
305 'latency': {'average': 26.5,
306 'hdrh': u'HISTFAAAAEx4nJNpmSgz...bFRgxi',
307 'histogram': {20: 303,
317 257: {'err_cntrs': {'dropped': 0,
322 'latency': {'average': 29.75,
323 'hdrh': u'HISTFAAAAEV4nJN...CALilDG0=',
324 'histogram': {20: 261,
333 384: {'err_cntrs': {'dropped': 0,
338 'latency': {'average': 18.0,
339 'hdrh': u'HISTFAAAADR4nJNpm...MjCwDDxAZG',
340 'histogram': {20: 987, 30: 14},
345 385: {'err_cntrs': {'dropped': 0,
350 'latency': {'average': 19.0,
351 'hdrh': u'HISTFAAAADR4nJNpm...NkYmJgDdagfK',
352 'histogram': {20: 989, 30: 11},
357 'global': {'bad_hdr': 0, 'old_flow': 0}},
361 total_min = float("inf")
362 for chain_id in range(self.chain_count):
364 _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
365 lat = in_stats['latency'][lat_pg_id]['latency']
366 # dropped_pkts += lat['err_cntrs']['dropped']
367 total_max = max(lat['total_max'], total_max)
368 total_min = min(lat['total_min'], total_min)
369 average += lat['average']
372 if total_min == float("inf"):
374 results['min_delay_usec'] = total_min
375 results['max_delay_usec'] = total_max
376 results['avg_delay_usec'] = int(average / self.chain_count)
378 def _bind_vxlan(self):
379 bind_layers(UDP, VXLAN, dport=4789)
380 bind_layers(VXLAN, Ether)
382 def _create_pkt(self, stream_cfg, l2frame_size, disable_random_latency_flow=False):
383 """Create a packet of given size.
385 l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
387 # Trex will add the FCS field, so we need to remove 4 bytes from the l2 frame size
388 frame_size = int(l2frame_size) - 4
390 if stream_cfg['vxlan'] is True:
393 pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
394 if stream_cfg['vtep_vlan'] is not None:
395 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
396 pkt_base /= IP(src=stream_cfg['vtep_src_ip'], dst=stream_cfg['vtep_dst_ip'])
397 pkt_base /= UDP(sport=random.randint(1337, 32767), dport=4789)
398 pkt_base /= VXLAN(vni=stream_cfg['net_vni'])
399 pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
400 # need to randomize the outer header UDP src port based on flow
401 vxlan_udp_src_fv = STLVmFlowVar(
402 name="vxlan_udp_src",
407 vm_param = [vxlan_udp_src_fv,
408 STLVmWrFlowVar(fv_name="vxlan_udp_src", pkt_offset="UDP.sport")]
409 elif stream_cfg['mpls'] is True:
411 pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
412 if stream_cfg['vtep_vlan'] is not None:
413 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
414 if stream_cfg['mpls_outer_label'] is not None:
415 pkt_base /= MPLS(label=stream_cfg['mpls_outer_label'], cos=1, s=0, ttl=255)
416 if stream_cfg['mpls_inner_label'] is not None:
417 pkt_base /= MPLS(label=stream_cfg['mpls_inner_label'], cos=1, s=1, ttl=255)
418 # Flow stats and MPLS labels randomization TBD
419 pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
422 pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
424 if stream_cfg['vlan_tag'] is not None:
425 pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
428 if stream_cfg['udp_src_port']:
429 udp_args['sport'] = int(stream_cfg['udp_src_port'])
430 if stream_cfg['udp_port_step'] == 'random':
433 step = stream_cfg['udp_port_step']
434 udp_args['sport_step'] = int(step)
435 udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
436 if stream_cfg['udp_dst_port']:
437 udp_args['dport'] = int(stream_cfg['udp_dst_port'])
438 if stream_cfg['udp_port_step'] == 'random':
441 step = stream_cfg['udp_port_step']
442 udp_args['dport_step'] = int(step)
443 udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
445 pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
446 UDP(dport=udp_args['dport'], sport=udp_args['sport'])
448 # STLVmTupleGen need flow count >= cores used by TRex, if FC < cores we used STLVmFlowVar
449 if stream_cfg['ip_addrs_step'] == '0.0.0.1' and stream_cfg['udp_port_step'] == '1' and \
450 stream_cfg['count'] >= self.generator_config.cores:
451 src_fv = STLVmTupleGen(ip_min=stream_cfg['ip_src_addr'],
452 ip_max=stream_cfg['ip_src_addr_max'],
453 port_min=udp_args['sport'],
454 port_max=udp_args['sport_max'],
456 limit_flows=stream_cfg['count'])
457 dst_fv = STLVmTupleGen(ip_min=stream_cfg['ip_dst_addr'],
458 ip_max=stream_cfg['ip_dst_addr_max'],
459 port_min=udp_args['dport'],
460 port_max=udp_args['dport_max'],
462 limit_flows=stream_cfg['count'])
465 STLVmWrFlowVar(fv_name="tuple_src.ip",
466 pkt_offset="IP:{}.src".format(encap_level)),
467 STLVmWrFlowVar(fv_name="tuple_src.port",
468 pkt_offset="UDP:{}.sport".format(encap_level)),
470 STLVmWrFlowVar(fv_name="tuple_dst.ip",
471 pkt_offset="IP:{}.dst".format(encap_level)),
472 STLVmWrFlowVar(fv_name="tuple_dst.port",
473 pkt_offset="UDP:{}.dport".format(encap_level)),
476 if disable_random_latency_flow:
477 src_fv_ip = STLVmFlowVar(
479 min_value=stream_cfg['ip_src_addr'],
480 max_value=stream_cfg['ip_src_addr'],
482 dst_fv_ip = STLVmFlowVar(
484 min_value=stream_cfg['ip_dst_addr'],
485 max_value=stream_cfg['ip_dst_addr'],
487 elif stream_cfg['ip_addrs_step'] == 'random':
488 src_fv_ip = STLVmFlowVarRepeatableRandom(
490 min_value=stream_cfg['ip_src_addr'],
491 max_value=stream_cfg['ip_src_addr_max'],
493 seed=random.randint(0, 32767),
494 limit=stream_cfg['ip_src_count'])
495 dst_fv_ip = STLVmFlowVarRepeatableRandom(
497 min_value=stream_cfg['ip_dst_addr'],
498 max_value=stream_cfg['ip_dst_addr_max'],
500 seed=random.randint(0, 32767),
501 limit=stream_cfg['ip_dst_count'])
503 src_fv_ip = STLVmFlowVar(
505 min_value=stream_cfg['ip_src_addr'],
506 max_value=stream_cfg['ip_src_addr_max'],
509 step=stream_cfg['ip_addrs_step'])
510 dst_fv_ip = STLVmFlowVar(
512 min_value=stream_cfg['ip_dst_addr'],
513 max_value=stream_cfg['ip_dst_addr_max'],
516 step=stream_cfg['ip_addrs_step'])
518 if disable_random_latency_flow:
519 src_fv_port = STLVmFlowVar(
521 min_value=udp_args['sport'],
522 max_value=udp_args['sport'],
524 dst_fv_port = STLVmFlowVar(
526 min_value=udp_args['dport'],
527 max_value=udp_args['dport'],
529 elif stream_cfg['udp_port_step'] == 'random':
530 src_fv_port = STLVmFlowVarRepeatableRandom(
532 min_value=udp_args['sport'],
533 max_value=udp_args['sport_max'],
535 seed=random.randint(0, 32767),
536 limit=stream_cfg['udp_src_count'])
537 dst_fv_port = STLVmFlowVarRepeatableRandom(
539 min_value=udp_args['dport'],
540 max_value=udp_args['dport_max'],
542 seed=random.randint(0, 32767),
543 limit=stream_cfg['udp_dst_count'])
545 src_fv_port = STLVmFlowVar(
547 min_value=udp_args['sport'],
548 max_value=udp_args['sport_max'],
551 step=udp_args['sport_step'])
552 dst_fv_port = STLVmFlowVar(
554 min_value=udp_args['dport'],
555 max_value=udp_args['dport_max'],
558 step=udp_args['dport_step'])
561 STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
563 STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
565 STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
567 STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
569 # Use HW Offload to calculate the outter IP/UDP packet
570 vm_param.append(STLVmFixChecksumHw(l3_offset="IP:0",
572 l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP))
573 # Use software to fix the inner IP/UDP payload for VxLAN packets
575 vm_param.append(STLVmFixIpv4(offset="IP:1"))
576 pad = max(0, frame_size - len(pkt_base)) * 'x'
578 return STLPktBuilder(pkt=pkt_base / pad,
579 vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
581 def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
583 """Create a list of streams corresponding to a given chain and stream config.
585 port: port where the streams originate (0 or 1)
586 chain_id: the chain to which the streams are associated to
587 stream_cfg: stream configuration
588 l2frame: L2 frame size (including 4-byte FCS) or 'IMIX'
589 latency: if True also create a latency stream
590 e2e: True if performing "end to end" connectivity check
593 pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
594 if l2frame == 'IMIX':
595 for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
596 pkt = self._create_pkt(stream_cfg, l2_frame_size)
597 if e2e or stream_cfg['mpls']:
598 streams.append(STLStream(packet=pkt,
599 mode=STLTXCont(pps=ratio)))
601 if stream_cfg['vxlan'] is True:
602 streams.append(STLStream(packet=pkt,
603 flow_stats=STLFlowStats(pg_id=pg_id,
605 if not self.config.no_flow_stats else None,
606 mode=STLTXCont(pps=ratio)))
608 streams.append(STLStream(packet=pkt,
609 flow_stats=STLFlowStats(pg_id=pg_id)
610 if not self.config.no_flow_stats else None,
611 mode=STLTXCont(pps=ratio)))
614 # for IMIX, the latency packets have the average IMIX packet size
615 if stream_cfg['ip_addrs_step'] == 'random' or \
616 stream_cfg['udp_port_step'] == 'random':
617 # Force latency flow to only one flow to avoid creating flows
618 # over requested flow count
619 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE, True)
621 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
624 l2frame_size = int(l2frame)
625 pkt = self._create_pkt(stream_cfg, l2frame_size)
626 if e2e or stream_cfg['mpls']:
627 streams.append(STLStream(packet=pkt,
628 # Flow stats is disabled for MPLS now
629 # flow_stats=STLFlowStats(pg_id=pg_id),
632 if stream_cfg['vxlan'] is True:
633 streams.append(STLStream(packet=pkt,
634 flow_stats=STLFlowStats(pg_id=pg_id,
636 if not self.config.no_flow_stats else None,
639 streams.append(STLStream(packet=pkt,
640 flow_stats=STLFlowStats(pg_id=pg_id)
641 if not self.config.no_flow_stats else None,
643 # for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
644 # without vlan, the min l2 frame size is 64
646 # This only applies to the latency stream
648 if stream_cfg['vlan_tag'] and l2frame_size < 68:
650 if stream_cfg['ip_addrs_step'] == 'random' or \
651 stream_cfg['udp_port_step'] == 'random':
652 # Force latency flow to only one flow to avoid creating flows
653 # over requested flow count
654 pkt = self._create_pkt(stream_cfg, l2frame_size, True)
656 pkt = self._create_pkt(stream_cfg, l2frame_size)
659 if self.config.no_latency_stats:
660 LOG.info("Latency flow statistics are disabled.")
661 if stream_cfg['vxlan'] is True:
662 streams.append(STLStream(packet=pkt,
663 flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
665 if not self.config.no_latency_stats else None,
666 mode=STLTXCont(pps=self.LATENCY_PPS)))
668 streams.append(STLStream(packet=pkt,
669 flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
670 if not self.config.no_latency_stats else None,
671 mode=STLTXCont(pps=self.LATENCY_PPS)))
675 def __connect(self, client):
678 def __local_server_status(self):
679 """ The TRex server may have started but failed initializing... and stopped.
680 This piece of code is especially designed to address
681 the case when a fatal failure occurs on a DPDK init call.
682 The TRex algorihm should be revised to include some missing timeouts (?)
685 1: fatal error detected - should lead to exiting the run
686 2: error detected that could be solved by starting again
687 The diagnostic is based on parsing the local trex log file (improvable)
699 with open('/tmp/trex.log', 'r') as trex_log:
700 for _line in trex_log:
702 if line.startswith('Usage:'):
704 if 'ports are bound' in line:
706 if 'please wait' in line:
708 if 'exit' in line.lower():
710 elif 'cause' in line.lower():
712 elif 'fail' in line.lower():
714 elif 'msg' in line.lower():
716 elif (error is not None) and line:
718 elif line.startswith('Error:') or line.startswith('ERROR'):
722 except FileNotFoundError:
724 if exited is not None:
726 LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', exited)
727 if cause is not None:
728 LOG.info("TRex [cont'd] %s", cause)
729 if failure is not None:
730 LOG.info("TRex [cont'd] %s", failure)
731 if message is not None:
732 LOG.info("TRex [cont'd] %s", message)
733 if 'not supported yet' in message.lower():
734 LOG.info("TRex [cont'd] Try starting again!")
736 elif error is not None:
738 LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', error)
739 if after is not None:
740 LOG.info("TRex [cont'd] %s", after)
741 elif before is not None:
742 LOG.info("TRex [cont'd] %s", before)
745 def __connect_after_start(self):
746 # after start, Trex may take a bit of time to initialize
747 # so we need to retry a few times
748 # we try to capture recoverable error cases (checking status)
750 for it in range(self.config.generic_retry_count):
753 self.client.connect()
755 except Exception as ex:
756 if it == (self.config.generic_retry_count - 1):
758 status = self.__local_server_status()
760 # No need to wait anymore, something went wrong and TRex exited
762 LOG.info("\x1b[1m%s\x1b[0m", 'TRex failed starting!')
763 print("More information? Try the command: "
764 + "\x1b[1mnfvbench --show-trex-log\x1b[0m")
767 # a new start will follow
769 LOG.info("Retrying connection to TRex (%s)...", ex.msg)
773 """Connect to the TRex server."""
775 server_ip = self.generator_config.ip
776 LOG.info("Connecting to TRex (%s)...", server_ip)
778 # Connect to TRex server
779 self.client = STLClient(server=server_ip, sync_port=self.generator_config.zmq_rpc_port,
780 async_port=self.generator_config.zmq_pub_port)
782 self.__connect(self.client)
783 if server_ip == '127.0.0.1':
784 config_updated = self.__check_config()
785 if config_updated or self.config.restart:
786 status = self.__restart()
787 except (TimeoutError, STLError) as e:
788 if server_ip == '127.0.0.1':
789 status = self.__start_local_server()
791 raise TrafficGeneratorException(e.message) from e
794 # Workaround in case of a failed TRex server initialization
795 # we try to start it again (twice maximum)
796 # which may allow low level initialization to complete.
797 if self.__start_local_server() == 2:
798 self.__start_local_server()
800 ports = list(self.generator_config.ports)
801 self.port_handle = ports
803 self.client.reset(ports)
804 # Read HW information from each port
805 # this returns an array of dict (1 per port)
807 Example of output for Intel XL710
808 [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
809 'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
810 u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
811 u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
812 u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
813 'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
814 u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
815 'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
816 'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
817 'layer_mode': 'Ethernet', u'numa': 0}, ...]
819 self.port_info = self.client.get_port_info(ports)
820 LOG.info('Connected to TRex')
821 for id, port in enumerate(self.port_info):
822 LOG.info(' Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
823 id, port['description'], port['speed'], port['src_mac'],
824 port['pci_addr'], port['driver'])
825 # Make sure the 2 ports have the same speed
826 if self.port_info[0]['speed'] != self.port_info[1]['speed']:
827 raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
828 (self.port_info[0]['speed'],
829 self.port_info[1]['speed']))
831 def __start_local_server(self):
833 LOG.info("Starting TRex ...")
834 self.__start_server()
835 status = self.__connect_after_start()
836 except (TimeoutError, STLError) as e:
837 LOG.error('Cannot connect to TRex')
838 LOG.error(traceback.format_exc())
839 logpath = '/tmp/trex.log'
840 if os.path.isfile(logpath):
841 # Wait for TRex to finish writing error message
843 for _ in range(self.config.generic_retry_count):
844 size = os.path.getsize(logpath)
845 if size == last_size:
846 # probably not writing anymore
850 with open(logpath, 'r') as f:
854 raise TrafficGeneratorException(message) from e
857 def __start_server(self):
858 server = TRexTrafficServer()
859 server.run_server(self.generator_config)
861 def __check_config(self):
862 server = TRexTrafficServer()
863 return server.check_config_updated(self.generator_config)
866 LOG.info("Restarting TRex ...")
868 # Wait for server stopped
869 for _ in range(self.config.generic_retry_count):
871 if not self.client.is_connected():
872 LOG.info("TRex is stopped...")
874 # Start and report a possible failure
875 return self.__start_local_server()
877 def __stop_server(self):
878 if self.generator_config.ip == '127.0.0.1':
879 ports = self.client.get_acquired_ports()
880 LOG.info('Release ports %s and stopping TRex...', ports)
883 self.client.release(ports=ports)
884 self.client.server_shutdown()
885 except STLError as e:
886 LOG.warning('Unable to stop TRex. Error: %s', e)
888 LOG.info('Using remote TRex. Unable to stop TRex')
890 def resolve_arp(self):
891 """Resolve all configured remote IP addresses.
893 return: None if ARP failed to resolve for all IP addresses
894 else a dict of list of dest macs indexed by port#
895 the dest macs in the list are indexed by the chain id
897 self.client.set_service_mode(ports=self.port_handle)
898 LOG.info('Polling ARP until successful...')
900 for port, device in zip(self.port_handle, self.generator_config.devices):
901 # there should be 1 stream config per chain
902 stream_configs = device.get_stream_configs()
903 chain_count = len(stream_configs)
904 ctx = self.client.create_service_ctx(port=port)
905 # all dest macs on this port indexed by chain ID
906 dst_macs = [None] * chain_count
908 # the index in the list is the chain id
909 if self.config.vxlan or self.config.mpls:
912 src_ip=device.vtep_src_ip,
913 dst_ip=device.vtep_dst_ip,
914 vlan=device.vtep_vlan)
915 for cfg in stream_configs
920 src_ip=cfg['ip_src_tg_gw'],
921 dst_ip=cfg['mac_discovery_gw'],
922 # will be None if no vlan tagging
923 vlan=cfg['vlan_tag'])
924 for cfg in stream_configs
927 for attempt in range(self.config.generic_retry_count):
931 LOG.error(traceback.format_exc())
935 for chain_id, mac in enumerate(dst_macs):
937 arp_record = arps[chain_id].get_record()
938 if arp_record.dst_mac:
939 dst_macs[chain_id] = arp_record.dst_mac
941 LOG.info(' ARP: port=%d chain=%d src IP=%s dst IP=%s -> MAC=%s',
944 arp_record.dst_ip, arp_record.dst_mac)
946 unresolved.append(arp_record.dst_ip)
947 if dst_macs_count == chain_count:
948 arp_dest_macs[port] = dst_macs
949 LOG.info('ARP resolved successfully for port %s', port)
953 LOG.info('Retrying ARP for: %s (retry %d/%d)',
954 unresolved, retry, self.config.generic_retry_count)
955 if retry < self.config.generic_retry_count:
956 time.sleep(self.config.generic_poll_sec)
958 LOG.error('ARP timed out for port %s (resolved %d out of %d)',
964 # A traffic capture may have been started (from a T-Rex console) at this time.
965 # If asked so, we keep the service mode enabled here, and disable it otherwise.
966 # | Disabling the service mode while a capture is in progress
967 # | would cause the application to stop/crash with an error.
968 if not self.config.service_mode:
969 self.client.set_service_mode(ports=self.port_handle, enabled=False)
970 if len(arp_dest_macs) == len(self.port_handle):
974 def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
975 """Check if rate provided by user is above requirements. Applies only if latency is True."""
976 intf_speed = self.generator_config.intf_speed
982 r = utils.convert_rates(l2frame_size, rate, intf_speed)
983 total_rate += int(r['rate_pps'])
986 r = utils.convert_rates(l2frame_size, rates[0], intf_speed)
987 total_rate = int(r['rate_pps'])
988 # rate must be enough for latency stream and at least 1 pps for base stream per chain
989 required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
990 result = utils.convert_rates(l2frame_size,
991 {'rate_pps': required_rate},
993 result['result'] = total_rate >= required_rate
996 return {'result': True}
998 def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
999 """Program all the streams in Trex server.
1001 l2frame_size: L2 frame size or IMIX
1002 rates: a list of 2 rates to run each direction
1003 each rate is a dict like {'rate_pps': '10kpps'}
1004 bidirectional: True if bidirectional
1005 latency: True if latency measurement is needed
1006 e2e: True if performing "end to end" connectivity check
1008 if self.config.no_flow_stats:
1009 LOG.info("Traffic flow statistics are disabled.")
1010 r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
1012 raise TrafficGeneratorException(
1013 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
1014 .format(pps=r['rate_pps'],
1016 load=r['rate_percent']))
1017 self.l2_frame_size = l2frame_size
1018 # a dict of list of streams indexed by port#
1019 # in case of fixed size, has self.chain_count * 2 * 2 streams
1020 # (1 normal + 1 latency stream per direction per chain)
1021 # for IMIX, has self.chain_count * 2 * 4 streams
1022 # (3 normal + 1 latency stream per direction per chain)
1024 for port in self.port_handle:
1025 streamblock[port] = []
1026 stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
1027 if self.generator_config.ip_addrs_step == 'random' \
1028 or self.generator_config.gen_config.udp_port_step == 'random':
1029 LOG.warning("Using random step, the number of flows can be less than "
1030 "the requested number of flows due to repeatable multivariate random "
1031 "generation which can reproduce the same pattern of values")
1032 self.rates = [utils.to_rate_str(rate) for rate in rates]
1033 for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
1034 streamblock[0].extend(self.generate_streams(self.port_handle[0],
1040 if len(self.rates) > 1:
1041 streamblock[1].extend(self.generate_streams(self.port_handle[1],
1045 latency=bidirectional and latency,
1048 for port in self.port_handle:
1049 if self.config.vxlan:
1050 self.client.set_port_attr(ports=port, vxlan_fs=[4789])
1052 self.client.set_port_attr(ports=port, vxlan_fs=None)
1053 self.client.add_streams(streamblock[port], ports=port)
1054 LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
1056 def clear_streamblock(self):
1057 """Clear all streams from TRex."""
1059 self.client.reset(self.port_handle)
1060 LOG.info('Cleared all existing streams')
1062 def get_stats(self, if_stats=None):
1063 """Get stats from Trex."""
1064 stats = self.client.get_stats()
1065 return self.extract_stats(stats, if_stats)
1068 """Return the Trex local port MAC addresses.
1070 return: a list of MAC addresses indexed by the port#
1072 return [port['src_mac'] for port in self.port_info]
1074 def get_port_speed_gbps(self):
1075 """Return the Trex local port MAC addresses.
1077 return: a list of speed in Gbps indexed by the port#
1079 return [port['speed'] for port in self.port_info]
1081 def clear_stats(self):
1082 """Clear all stats in the traffic gneerator."""
1083 if self.port_handle:
1084 self.client.clear_stats()
1086 def start_traffic(self):
1087 """Start generating traffic in all ports."""
1088 for port, rate in zip(self.port_handle, self.rates):
1089 self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
1091 def stop_traffic(self):
1092 """Stop generating traffic."""
1093 self.client.stop(ports=self.port_handle)
1095 def start_capture(self):
1096 """Capture all packets on both ports that are unicast to us."""
1099 # Need to filter out unwanted packets so we do not end up counting
1100 # src MACs of frames that are not unicast to us
1101 src_mac_list = self.get_macs()
1102 bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
1103 # ports must be set in service in order to enable capture
1104 self.client.set_service_mode(ports=self.port_handle)
1105 self.capture_id = self.client.start_capture \
1106 (rx_ports=self.port_handle, bpf_filter=bpf_filter)
1108 def fetch_capture_packets(self):
1109 """Fetch capture packets in capture mode."""
1111 self.packet_list = []
1112 self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
1113 output=self.packet_list)
1115 def stop_capture(self):
1116 """Stop capturing packets."""
1118 self.client.stop_capture(capture_id=self.capture_id['id'])
1119 self.capture_id = None
1120 # A traffic capture may have been started (from a T-Rex console) at this time.
1121 # If asked so, we keep the service mode enabled here, and disable it otherwise.
1122 # | Disabling the service mode while a capture is in progress
1123 # | would cause the application to stop/crash with an error.
1124 if not self.config.service_mode:
1125 self.client.set_service_mode(ports=self.port_handle, enabled=False)
1128 """Cleanup Trex driver."""
1131 self.client.reset(self.port_handle)
1132 self.client.disconnect()
1134 # TRex does not like a reset while in disconnected state
1137 def set_service_mode(self, enabled=True):
1138 """Enable/disable the 'service' mode."""
1139 self.client.set_service_mode(ports=self.port_handle, enabled=enabled)