1 # Copyright 2016 Cisco Systems, Inc. All rights reserved.
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
14 """Driver module for TRex traffic generator."""
22 from functools import reduce
24 from itertools import count
25 # pylint: disable=import-error
26 from scapy.contrib.mpls import MPLS # flake8: noqa
27 # pylint: enable=import-error
28 from nfvbench.log import LOG
29 from nfvbench.traffic_server import TRexTrafficServer
30 from nfvbench.utils import cast_integer
31 from nfvbench.utils import timeout
32 from nfvbench.utils import TimeoutError
34 from hdrh.histogram import HdrHistogram
36 # pylint: disable=import-error
37 from trex.common.services.trex_service_arp import ServiceARP
38 from trex.stl.api import bind_layers
39 from trex.stl.api import CTRexVmInsFixHwCs
40 from trex.stl.api import Dot1Q
41 from trex.stl.api import Ether
42 from trex.stl.api import FlagsField
43 from trex.stl.api import IP
44 from trex.stl.api import Packet
45 from trex.stl.api import STLClient
46 from trex.stl.api import STLError
47 from trex.stl.api import STLFlowLatencyStats
48 from trex.stl.api import STLFlowStats
49 from trex.stl.api import STLPktBuilder
50 from trex.stl.api import STLScVmRaw
51 from trex.stl.api import STLStream
52 from trex.stl.api import STLTXCont
53 from trex.stl.api import STLVmFixChecksumHw
54 from trex.stl.api import STLVmFixIpv4
55 from trex.stl.api import STLVmFlowVar
56 from trex.stl.api import STLVmFlowVarRepeatableRandom
57 from trex.stl.api import STLVmTupleGen
58 from trex.stl.api import STLVmWrFlowVar
59 from trex.stl.api import ThreeBytesField
60 from trex.stl.api import UDP
61 from trex.stl.api import XByteField
63 # pylint: enable=import-error
65 from .traffic_base import AbstractTrafficGenerator
66 from .traffic_base import TrafficGeneratorException
67 from . import traffic_utils as utils
68 from .traffic_utils import IMIX_AVG_L2_FRAME_SIZE
69 from .traffic_utils import IMIX_L2_SIZES
70 from .traffic_utils import IMIX_RATIOS
75 _VXLAN_FLAGS = ['R' * 27] + ['I'] + ['R' * 5]
77 fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS),
78 ThreeBytesField("vni", 0),
79 XByteField("reserved", 0x00)]
83 return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
85 class TRex(AbstractTrafficGenerator):
86 """TRex traffic generator driver."""
89 CHAIN_PG_ID_MASK = 0x007F
90 PORT_PG_ID_MASK = 0x0080
91 LATENCY_PG_ID_MASK = 0x0100
93 def __init__(self, traffic_client):
95 AbstractTrafficGenerator.__init__(self, traffic_client)
99 self.chain_count = self.generator_config.service_chain_count
101 self.capture_id = None
102 self.packet_list = []
103 self.l2_frame_size = 0
105 def get_version(self):
106 """Get the Trex version."""
107 return self.client.get_server_version() if self.client else ''
109 def get_pg_id(self, port, chain_id):
110 """Calculate the packet group IDs to use for a given port/stream type/chain_id.
113 chain_id: identifies to which chain the pg_id is associated (0 to 255)
114 return: pg_id, lat_pg_id
116 We use a bit mask to set up the 3 fields:
117 0x007F: chain ID (8 bits for a max of 128 chains)
121 pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
122 return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
124 def extract_stats(self, in_stats, ifstats):
125 """Extract stats from dict returned by Trex API.
127 :param in_stats: dict as returned by TRex api
129 utils.nan_replace(in_stats)
130 # LOG.debug(in_stats)
133 # port_handles should have only 2 elements: [0, 1]
134 # so (1 - ph) will be the index for the far end port
135 for ph in self.port_handle:
137 far_end_stats = in_stats[1 - ph]
140 'total_pkts': cast_integer(stats['opackets']),
141 'total_pkt_bytes': cast_integer(stats['obytes']),
142 'pkt_rate': cast_integer(stats['tx_pps']),
143 'pkt_bit_rate': cast_integer(stats['tx_bps'])
146 'total_pkts': cast_integer(stats['ipackets']),
147 'total_pkt_bytes': cast_integer(stats['ibytes']),
148 'pkt_rate': cast_integer(stats['rx_pps']),
149 'pkt_bit_rate': cast_integer(stats['rx_bps']),
150 # how many pkts were dropped in RX direction
151 # need to take the tx counter on the far end port
152 'dropped_pkts': cast_integer(
153 far_end_stats['opackets'] - stats['ipackets'])
156 self.__combine_latencies(in_stats, result[ph]['rx'], ph)
158 total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
159 result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
160 # actual offered tx rate in bps
161 avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
162 total_tx_bps = utils.pps_to_bps(result["total_tx_rate"], avg_packet_size)
163 result['offered_tx_rate_bps'] = total_tx_bps
165 result.update(self.get_theoretical_rates(avg_packet_size))
167 result["flow_stats"] = in_stats["flow_stats"]
168 result["latency"] = in_stats["latency"]
170 # Merge HDRHistogram to have an overall value for all chains and ports
174 for chain_id, _ in enumerate(ifstats):
175 for ph in self.port_handle:
176 _, lat_pg_id = self.get_pg_id(ph, chain_id)
178 HdrHistogram.decode(in_stats['latency'][lat_pg_id]['latency']['hdrh']))
180 for pg_id in in_stats['latency']:
181 if pg_id != 'global':
183 HdrHistogram.decode(in_stats['latency'][pg_id]['latency']['hdrh']))
188 decoded_hdrh = reduce(add_hdrh, hdrh_list)
189 result["hdrh"] = HdrHistogram.encode(decoded_hdrh).decode('utf-8')
195 def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
196 """Extract the aggregated stats for a given chain.
198 trex_stats: stats as returned by get_stats()
199 if_stats: a list of 2 interface stats to update (port 0 and 1)
200 latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
201 latencies[p] is the latency for packets sent on port p
202 if there are no latency streams, the Latency instances are not modified
203 chain_idx: chain index of the interface stats
205 The packet counts include normal and latency streams.
207 Trex returns flows stats as follows:
209 'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
210 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
211 'rx_bytes': {0: nan, 1: nan, 'total': nan},
212 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
213 'rx_pps': {0: 0, 1: 0, 'total': 0},
214 'tx_bps': {0: 0, 1: 0, 'total': 0},
215 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
216 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
217 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
218 'tx_pps': {0: 0, 1: 0, 'total': 0}},
219 1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
220 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
221 'rx_bytes': {0: nan, 1: nan, 'total': nan},
222 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
223 'rx_pps': {0: 0, 1: 0, 'total': 0},
224 'tx_bps': {0: 0, 1: 0, 'total': 0},
225 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
226 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
227 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
228 'tx_pps': {0: 0, 1: 0, 'total': 0}},
229 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
230 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
231 'rx_bytes': {0: nan, 1: nan, 'total': nan},
232 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
233 'rx_pps': {0: 0, 1: 0, 'total': 0},
234 'tx_bps': {0: 0, 1: 0, 'total': 0},
235 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
236 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
237 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
238 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
240 the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
242 packet counters for a given stream sent on port p are reported as:
243 - tx_pkts[p] on port p
244 - rx_pkts[1-p] on the far end port
246 This is a tricky/critical counter transposition operation because
247 the results are grouped by port (not by stream):
248 tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
249 rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
250 tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
251 rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
253 or using a more generic formula:
254 tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
255 rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
257 the second formula is equivalent to
258 rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
260 If there are latency streams, those same counters need to be added in the same way
262 def get_latency(lval):
264 return int(round(lval))
270 for port in range(2):
271 pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
272 for pid in [pg_id, lat_pg_id]:
274 pg_stats = trex_stats['flow_stats'][pid]
275 if_stats[port].tx += pg_stats['tx_pkts'][port]
276 if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
280 lat = trex_stats['latency'][lat_pg_id]['latency']
281 # dropped_pkts += lat['err_cntrs']['dropped']
282 latencies[port].max_usec = get_latency(lat['total_max'])
283 if math.isnan(lat['total_min']):
284 latencies[port].min_usec = 0
285 latencies[port].avg_usec = 0
287 latencies[port].min_usec = get_latency(lat['total_min'])
288 latencies[port].avg_usec = get_latency(lat['average'])
289 # pick up the HDR histogram if present (otherwise will raise KeyError)
290 latencies[port].hdrh = lat['hdrh']
294 def __combine_latencies(self, in_stats, results, port_handle):
295 """Traverse TRex result dictionary and combines chosen latency stats.
297 example of latency dict returned by trex (2 chains):
298 'latency': {256: {'err_cntrs': {'dropped': 0,
303 'latency': {'average': 26.5,
304 'hdrh': u'HISTFAAAAEx4nJNpmSgz...bFRgxi',
305 'histogram': {20: 303,
315 257: {'err_cntrs': {'dropped': 0,
320 'latency': {'average': 29.75,
321 'hdrh': u'HISTFAAAAEV4nJN...CALilDG0=',
322 'histogram': {20: 261,
331 384: {'err_cntrs': {'dropped': 0,
336 'latency': {'average': 18.0,
337 'hdrh': u'HISTFAAAADR4nJNpm...MjCwDDxAZG',
338 'histogram': {20: 987, 30: 14},
343 385: {'err_cntrs': {'dropped': 0,
348 'latency': {'average': 19.0,
349 'hdrh': u'HISTFAAAADR4nJNpm...NkYmJgDdagfK',
350 'histogram': {20: 989, 30: 11},
355 'global': {'bad_hdr': 0, 'old_flow': 0}},
359 total_min = float("inf")
360 for chain_id in range(self.chain_count):
362 _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
363 lat = in_stats['latency'][lat_pg_id]['latency']
364 # dropped_pkts += lat['err_cntrs']['dropped']
365 total_max = max(lat['total_max'], total_max)
366 total_min = min(lat['total_min'], total_min)
367 average += lat['average']
370 if total_min == float("inf"):
372 results['min_delay_usec'] = total_min
373 results['max_delay_usec'] = total_max
374 results['avg_delay_usec'] = int(average / self.chain_count)
376 def _bind_vxlan(self):
377 bind_layers(UDP, VXLAN, dport=4789)
378 bind_layers(VXLAN, Ether)
380 def _create_pkt(self, stream_cfg, l2frame_size, disable_random_latency_flow=False):
381 """Create a packet of given size.
383 l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
385 # Trex will add the FCS field, so we need to remove 4 bytes from the l2 frame size
386 frame_size = int(l2frame_size) - 4
388 if stream_cfg['vxlan'] is True:
391 pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
392 if stream_cfg['vtep_vlan'] is not None:
393 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
394 pkt_base /= IP(src=stream_cfg['vtep_src_ip'], dst=stream_cfg['vtep_dst_ip'])
395 pkt_base /= UDP(sport=random.randint(1337, 32767), dport=4789)
396 pkt_base /= VXLAN(vni=stream_cfg['net_vni'])
397 pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
398 # need to randomize the outer header UDP src port based on flow
399 vxlan_udp_src_fv = STLVmFlowVar(
400 name="vxlan_udp_src",
405 vm_param = [vxlan_udp_src_fv,
406 STLVmWrFlowVar(fv_name="vxlan_udp_src", pkt_offset="UDP.sport")]
407 elif stream_cfg['mpls'] is True:
409 pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
410 if stream_cfg['vtep_vlan'] is not None:
411 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
412 if stream_cfg['mpls_outer_label'] is not None:
413 pkt_base /= MPLS(label=stream_cfg['mpls_outer_label'], cos=1, s=0, ttl=255)
414 if stream_cfg['mpls_inner_label'] is not None:
415 pkt_base /= MPLS(label=stream_cfg['mpls_inner_label'], cos=1, s=1, ttl=255)
416 # Flow stats and MPLS labels randomization TBD
417 pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
420 pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
422 if stream_cfg['vlan_tag'] is not None:
423 pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
426 if stream_cfg['udp_src_port']:
427 udp_args['sport'] = int(stream_cfg['udp_src_port'])
428 if stream_cfg['udp_port_step'] == 'random':
431 step = stream_cfg['udp_port_step']
432 udp_args['sport_step'] = int(step)
433 udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
434 if stream_cfg['udp_dst_port']:
435 udp_args['dport'] = int(stream_cfg['udp_dst_port'])
436 if stream_cfg['udp_port_step'] == 'random':
439 step = stream_cfg['udp_port_step']
440 udp_args['dport_step'] = int(step)
441 udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
443 pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
444 UDP(dport=udp_args['dport'], sport=udp_args['sport'])
446 # STLVmTupleGen need flow count >= cores used by TRex, if FC < cores we used STLVmFlowVar
447 if stream_cfg['ip_addrs_step'] == '0.0.0.1' and stream_cfg['udp_port_step'] == '1' and \
448 stream_cfg['count'] >= self.generator_config.cores:
449 src_fv = STLVmTupleGen(ip_min=stream_cfg['ip_src_addr'],
450 ip_max=stream_cfg['ip_src_addr_max'],
451 port_min=udp_args['sport'],
452 port_max=udp_args['sport_max'],
454 limit_flows=stream_cfg['count'])
455 dst_fv = STLVmTupleGen(ip_min=stream_cfg['ip_dst_addr'],
456 ip_max=stream_cfg['ip_dst_addr_max'],
457 port_min=udp_args['dport'],
458 port_max=udp_args['dport_max'],
460 limit_flows=stream_cfg['count'])
463 STLVmWrFlowVar(fv_name="tuple_src.ip",
464 pkt_offset="IP:{}.src".format(encap_level)),
465 STLVmWrFlowVar(fv_name="tuple_src.port",
466 pkt_offset="UDP:{}.sport".format(encap_level)),
468 STLVmWrFlowVar(fv_name="tuple_dst.ip",
469 pkt_offset="IP:{}.dst".format(encap_level)),
470 STLVmWrFlowVar(fv_name="tuple_dst.port",
471 pkt_offset="UDP:{}.dport".format(encap_level)),
474 if disable_random_latency_flow:
475 src_fv_ip = STLVmFlowVar(
477 min_value=stream_cfg['ip_src_addr'],
478 max_value=stream_cfg['ip_src_addr'],
480 dst_fv_ip = STLVmFlowVar(
482 min_value=stream_cfg['ip_dst_addr'],
483 max_value=stream_cfg['ip_dst_addr'],
485 elif stream_cfg['ip_addrs_step'] == 'random':
486 src_fv_ip = STLVmFlowVarRepeatableRandom(
488 min_value=stream_cfg['ip_src_addr'],
489 max_value=stream_cfg['ip_src_addr_max'],
491 seed=random.randint(0, 32767),
492 limit=stream_cfg['ip_src_count'])
493 dst_fv_ip = STLVmFlowVarRepeatableRandom(
495 min_value=stream_cfg['ip_dst_addr'],
496 max_value=stream_cfg['ip_dst_addr_max'],
498 seed=random.randint(0, 32767),
499 limit=stream_cfg['ip_dst_count'])
501 src_fv_ip = STLVmFlowVar(
503 min_value=stream_cfg['ip_src_addr'],
504 max_value=stream_cfg['ip_src_addr_max'],
507 step=stream_cfg['ip_addrs_step'])
508 dst_fv_ip = STLVmFlowVar(
510 min_value=stream_cfg['ip_dst_addr'],
511 max_value=stream_cfg['ip_dst_addr_max'],
514 step=stream_cfg['ip_addrs_step'])
516 if disable_random_latency_flow:
517 src_fv_port = STLVmFlowVar(
519 min_value=udp_args['sport'],
520 max_value=udp_args['sport'],
522 dst_fv_port = STLVmFlowVar(
524 min_value=udp_args['dport'],
525 max_value=udp_args['dport'],
527 elif stream_cfg['udp_port_step'] == 'random':
528 src_fv_port = STLVmFlowVarRepeatableRandom(
530 min_value=udp_args['sport'],
531 max_value=udp_args['sport_max'],
533 seed=random.randint(0, 32767),
534 limit=stream_cfg['udp_src_count'])
535 dst_fv_port = STLVmFlowVarRepeatableRandom(
537 min_value=udp_args['dport'],
538 max_value=udp_args['dport_max'],
540 seed=random.randint(0, 32767),
541 limit=stream_cfg['udp_dst_count'])
543 src_fv_port = STLVmFlowVar(
545 min_value=udp_args['sport'],
546 max_value=udp_args['sport_max'],
549 step=udp_args['sport_step'])
550 dst_fv_port = STLVmFlowVar(
552 min_value=udp_args['dport'],
553 max_value=udp_args['dport_max'],
556 step=udp_args['dport_step'])
559 STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
561 STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
563 STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
565 STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
567 # Use HW Offload to calculate the outter IP/UDP packet
568 vm_param.append(STLVmFixChecksumHw(l3_offset="IP:0",
570 l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP))
571 # Use software to fix the inner IP/UDP payload for VxLAN packets
573 vm_param.append(STLVmFixIpv4(offset="IP:1"))
574 pad = max(0, frame_size - len(pkt_base)) * 'x'
576 return STLPktBuilder(pkt=pkt_base / pad,
577 vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
579 def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
581 """Create a list of streams corresponding to a given chain and stream config.
583 port: port where the streams originate (0 or 1)
584 chain_id: the chain to which the streams are associated to
585 stream_cfg: stream configuration
586 l2frame: L2 frame size (including 4-byte FCS) or 'IMIX'
587 latency: if True also create a latency stream
588 e2e: True if performing "end to end" connectivity check
591 pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
592 if self.config.no_flow_stats:
593 LOG.info("Traffic flow statistics are disabled.")
594 if l2frame == 'IMIX':
595 for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
596 pkt = self._create_pkt(stream_cfg, l2_frame_size)
597 if e2e or stream_cfg['mpls']:
598 streams.append(STLStream(packet=pkt,
599 mode=STLTXCont(pps=ratio)))
601 if stream_cfg['vxlan'] is True:
602 streams.append(STLStream(packet=pkt,
603 flow_stats=STLFlowStats(pg_id=pg_id,
605 if not self.config.no_flow_stats else None,
606 mode=STLTXCont(pps=ratio)))
608 streams.append(STLStream(packet=pkt,
609 flow_stats=STLFlowStats(pg_id=pg_id)
610 if not self.config.no_flow_stats else None,
611 mode=STLTXCont(pps=ratio)))
614 # for IMIX, the latency packets have the average IMIX packet size
615 if stream_cfg['ip_addrs_step'] == 'random' or \
616 stream_cfg['udp_port_step'] == 'random':
617 # Force latency flow to only one flow to avoid creating flows
618 # over requested flow count
619 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE, True)
621 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
624 l2frame_size = int(l2frame)
625 pkt = self._create_pkt(stream_cfg, l2frame_size)
626 if e2e or stream_cfg['mpls']:
627 streams.append(STLStream(packet=pkt,
628 # Flow stats is disabled for MPLS now
629 # flow_stats=STLFlowStats(pg_id=pg_id),
632 if stream_cfg['vxlan'] is True:
633 streams.append(STLStream(packet=pkt,
634 flow_stats=STLFlowStats(pg_id=pg_id,
636 if not self.config.no_flow_stats else None,
639 streams.append(STLStream(packet=pkt,
640 flow_stats=STLFlowStats(pg_id=pg_id)
641 if not self.config.no_flow_stats else None,
643 # for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
644 # without vlan, the min l2 frame size is 64
646 # This only applies to the latency stream
648 if stream_cfg['vlan_tag'] and l2frame_size < 68:
650 if stream_cfg['ip_addrs_step'] == 'random' or \
651 stream_cfg['udp_port_step'] == 'random':
652 # Force latency flow to only one flow to avoid creating flows
653 # over requested flow count
654 pkt = self._create_pkt(stream_cfg, l2frame_size, True)
656 pkt = self._create_pkt(stream_cfg, l2frame_size)
659 if self.config.no_latency_stats:
660 LOG.info("Latency flow statistics are disabled.")
661 if stream_cfg['vxlan'] is True:
662 streams.append(STLStream(packet=pkt,
663 flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
665 if not self.config.no_latency_stats else None,
666 mode=STLTXCont(pps=self.LATENCY_PPS)))
668 streams.append(STLStream(packet=pkt,
669 flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
670 if not self.config.no_latency_stats else None,
671 mode=STLTXCont(pps=self.LATENCY_PPS)))
675 def __connect(self, client):
678 def __local_server_status(self):
679 """ The TRex server may have started but failed initializing... and stopped.
680 This piece of code is especially designed to address
681 the case when a fatal failure occurs on a DPDK init call.
682 The TRex algorihm should be revised to include some missing timeouts (?)
685 1: fatal error detected - should lead to exiting the run
686 2: error detected that could be solved by starting again
687 The diagnostic is based on parsing the local trex log file (improvable)
699 with open('/tmp/trex.log', 'r') as trex_log:
700 for _line in trex_log:
702 if line.startswith('Usage:'):
704 if 'ports are bound' in line:
706 if 'please wait' in line:
708 if 'exit' in line.lower():
710 elif 'cause' in line.lower():
712 elif 'fail' in line.lower():
714 elif 'msg' in line.lower():
716 elif (error is not None) and line:
718 elif line.startswith('Error:') or line.startswith('ERROR'):
722 except FileNotFoundError:
724 if exited is not None:
726 LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', exited)
727 if cause is not None:
728 LOG.info("TRex [cont'd] %s", cause)
729 if failure is not None:
730 LOG.info("TRex [cont'd] %s", failure)
731 if message is not None:
732 LOG.info("TRex [cont'd] %s", message)
733 if 'not supported yet' in message.lower():
734 LOG.info("TRex [cont'd] Try starting again!")
736 elif error is not None:
738 LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', error)
739 if after is not None:
740 LOG.info("TRex [cont'd] %s", after)
741 elif before is not None:
742 LOG.info("TRex [cont'd] %s", before)
745 def __connect_after_start(self):
746 # after start, Trex may take a bit of time to initialize
747 # so we need to retry a few times
748 # we try to capture recoverable error cases (checking status)
750 for it in range(self.config.generic_retry_count):
753 self.client.connect()
755 except Exception as ex:
756 if it == (self.config.generic_retry_count - 1):
758 status = self.__local_server_status()
760 # No need to wait anymore, something went wrong and TRex exited
762 LOG.info("\x1b[1m%s\x1b[0m", 'TRex failed starting!')
763 print("More information? Try the command: "
764 + "\x1b[1mnfvbench --show-trex-log\x1b[0m")
767 # a new start will follow
769 LOG.info("Retrying connection to TRex (%s)...", ex.msg)
773 """Connect to the TRex server."""
775 server_ip = self.generator_config.ip
776 LOG.info("Connecting to TRex (%s)...", server_ip)
778 # Connect to TRex server
779 self.client = STLClient(server=server_ip, sync_port=self.generator_config.zmq_rpc_port,
780 async_port=self.generator_config.zmq_pub_port)
782 self.__connect(self.client)
783 if server_ip == '127.0.0.1':
784 config_updated = self.__check_config()
785 if config_updated or self.config.restart:
786 status = self.__restart()
787 except (TimeoutError, STLError) as e:
788 if server_ip == '127.0.0.1':
789 status = self.__start_local_server()
791 raise TrafficGeneratorException(e.message) from e
794 # Workaround in case of a failed TRex server initialization
795 # we try to start it again (twice maximum)
796 # which may allow low level initialization to complete.
797 if self.__start_local_server() == 2:
798 self.__start_local_server()
800 ports = list(self.generator_config.ports)
801 self.port_handle = ports
803 self.client.reset(ports)
804 # Read HW information from each port
805 # this returns an array of dict (1 per port)
807 Example of output for Intel XL710
808 [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
809 'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
810 u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
811 u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
812 u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
813 'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
814 u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
815 'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
816 'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
817 'layer_mode': 'Ethernet', u'numa': 0}, ...]
819 self.port_info = self.client.get_port_info(ports)
820 LOG.info('Connected to TRex')
821 for id, port in enumerate(self.port_info):
822 LOG.info(' Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
823 id, port['description'], port['speed'], port['src_mac'],
824 port['pci_addr'], port['driver'])
825 # Make sure the 2 ports have the same speed
826 if self.port_info[0]['speed'] != self.port_info[1]['speed']:
827 raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
828 (self.port_info[0]['speed'],
829 self.port_info[1]['speed']))
831 def __start_local_server(self):
833 LOG.info("Starting TRex ...")
834 self.__start_server()
835 status = self.__connect_after_start()
836 except (TimeoutError, STLError) as e:
837 LOG.error('Cannot connect to TRex')
838 LOG.error(traceback.format_exc())
839 logpath = '/tmp/trex.log'
840 if os.path.isfile(logpath):
841 # Wait for TRex to finish writing error message
843 for _ in range(self.config.generic_retry_count):
844 size = os.path.getsize(logpath)
845 if size == last_size:
846 # probably not writing anymore
850 with open(logpath, 'r') as f:
854 raise TrafficGeneratorException(message) from e
857 def __start_server(self):
858 server = TRexTrafficServer()
859 server.run_server(self.generator_config)
861 def __check_config(self):
862 server = TRexTrafficServer()
863 return server.check_config_updated(self.generator_config)
866 LOG.info("Restarting TRex ...")
868 # Wait for server stopped
869 for _ in range(self.config.generic_retry_count):
871 if not self.client.is_connected():
872 LOG.info("TRex is stopped...")
874 # Start and report a possible failure
875 return self.__start_local_server()
877 def __stop_server(self):
878 if self.generator_config.ip == '127.0.0.1':
879 ports = self.client.get_acquired_ports()
880 LOG.info('Release ports %s and stopping TRex...', ports)
883 self.client.release(ports=ports)
884 self.client.server_shutdown()
885 except STLError as e:
886 LOG.warning('Unable to stop TRex. Error: %s', e)
888 LOG.info('Using remote TRex. Unable to stop TRex')
890 def resolve_arp(self):
891 """Resolve all configured remote IP addresses.
893 return: None if ARP failed to resolve for all IP addresses
894 else a dict of list of dest macs indexed by port#
895 the dest macs in the list are indexed by the chain id
897 self.client.set_service_mode(ports=self.port_handle)
898 LOG.info('Polling ARP until successful...')
900 for port, device in zip(self.port_handle, self.generator_config.devices):
901 # there should be 1 stream config per chain
902 stream_configs = device.get_stream_configs()
903 chain_count = len(stream_configs)
904 ctx = self.client.create_service_ctx(port=port)
905 # all dest macs on this port indexed by chain ID
906 dst_macs = [None] * chain_count
908 # the index in the list is the chain id
909 if self.config.vxlan or self.config.mpls:
912 src_ip=device.vtep_src_ip,
913 dst_ip=device.vtep_dst_ip,
914 vlan=device.vtep_vlan)
915 for cfg in stream_configs
920 src_ip=cfg['ip_src_tg_gw'],
921 dst_ip=cfg['mac_discovery_gw'],
922 # will be None if no vlan tagging
923 vlan=cfg['vlan_tag'])
924 for cfg in stream_configs
927 for attempt in range(self.config.generic_retry_count):
931 LOG.error(traceback.format_exc())
935 for chain_id, mac in enumerate(dst_macs):
937 arp_record = arps[chain_id].get_record()
938 if arp_record.dst_mac:
939 dst_macs[chain_id] = arp_record.dst_mac
941 LOG.info(' ARP: port=%d chain=%d src IP=%s dst IP=%s -> MAC=%s',
944 arp_record.dst_ip, arp_record.dst_mac)
946 unresolved.append(arp_record.dst_ip)
947 if dst_macs_count == chain_count:
948 arp_dest_macs[port] = dst_macs
949 LOG.info('ARP resolved successfully for port %s', port)
953 LOG.info('Retrying ARP for: %s (retry %d/%d)',
954 unresolved, retry, self.config.generic_retry_count)
955 if retry < self.config.generic_retry_count:
956 time.sleep(self.config.generic_poll_sec)
958 LOG.error('ARP timed out for port %s (resolved %d out of %d)',
964 # A traffic capture may have been started (from a T-Rex console) at this time.
965 # If asked so, we keep the service mode enabled here, and disable it otherwise.
966 # | Disabling the service mode while a capture is in progress
967 # | would cause the application to stop/crash with an error.
968 if not self.config.service_mode:
969 self.client.set_service_mode(ports=self.port_handle, enabled=False)
970 if len(arp_dest_macs) == len(self.port_handle):
974 def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
975 """Check if rate provided by user is above requirements. Applies only if latency is True."""
976 intf_speed = self.generator_config.intf_speed
982 r = utils.convert_rates(l2frame_size, rate, intf_speed)
983 total_rate += int(r['rate_pps'])
986 r = utils.convert_rates(l2frame_size, rates[0], intf_speed)
987 total_rate = int(r['rate_pps'])
988 # rate must be enough for latency stream and at least 1 pps for base stream per chain
989 required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
990 result = utils.convert_rates(l2frame_size,
991 {'rate_pps': required_rate},
993 result['result'] = total_rate >= required_rate
996 return {'result': True}
998 def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
999 """Program all the streams in Trex server.
1001 l2frame_size: L2 frame size or IMIX
1002 rates: a list of 2 rates to run each direction
1003 each rate is a dict like {'rate_pps': '10kpps'}
1004 bidirectional: True if bidirectional
1005 latency: True if latency measurement is needed
1006 e2e: True if performing "end to end" connectivity check
1008 r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
1010 raise TrafficGeneratorException(
1011 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
1012 .format(pps=r['rate_pps'],
1014 load=r['rate_percent']))
1015 self.l2_frame_size = l2frame_size
1016 # a dict of list of streams indexed by port#
1017 # in case of fixed size, has self.chain_count * 2 * 2 streams
1018 # (1 normal + 1 latency stream per direction per chain)
1019 # for IMIX, has self.chain_count * 2 * 4 streams
1020 # (3 normal + 1 latency stream per direction per chain)
1022 for port in self.port_handle:
1023 streamblock[port] = []
1024 stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
1025 if self.generator_config.ip_addrs_step == 'random' \
1026 or self.generator_config.gen_config.udp_port_step == 'random':
1027 LOG.warning("Using random step, the number of flows can be less than "
1028 "the requested number of flows due to repeatable multivariate random "
1029 "generation which can reproduce the same pattern of values")
1030 self.rates = [utils.to_rate_str(rate) for rate in rates]
1031 for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
1032 streamblock[0].extend(self.generate_streams(self.port_handle[0],
1038 if len(self.rates) > 1:
1039 streamblock[1].extend(self.generate_streams(self.port_handle[1],
1043 latency=bidirectional and latency,
1046 for port in self.port_handle:
1047 if self.config.vxlan:
1048 self.client.set_port_attr(ports=port, vxlan_fs=[4789])
1050 self.client.set_port_attr(ports=port, vxlan_fs=None)
1051 self.client.add_streams(streamblock[port], ports=port)
1052 LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
1054 def clear_streamblock(self):
1055 """Clear all streams from TRex."""
1057 self.client.reset(self.port_handle)
1058 LOG.info('Cleared all existing streams')
1060 def get_stats(self, if_stats=None):
1061 """Get stats from Trex."""
1062 stats = self.client.get_stats()
1063 return self.extract_stats(stats, if_stats)
1066 """Return the Trex local port MAC addresses.
1068 return: a list of MAC addresses indexed by the port#
1070 return [port['src_mac'] for port in self.port_info]
1072 def get_port_speed_gbps(self):
1073 """Return the Trex local port MAC addresses.
1075 return: a list of speed in Gbps indexed by the port#
1077 return [port['speed'] for port in self.port_info]
1079 def clear_stats(self):
1080 """Clear all stats in the traffic gneerator."""
1081 if self.port_handle:
1082 self.client.clear_stats()
1084 def start_traffic(self):
1085 """Start generating traffic in all ports."""
1086 for port, rate in zip(self.port_handle, self.rates):
1087 self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
1089 def stop_traffic(self):
1090 """Stop generating traffic."""
1091 self.client.stop(ports=self.port_handle)
1093 def start_capture(self):
1094 """Capture all packets on both ports that are unicast to us."""
1097 # Need to filter out unwanted packets so we do not end up counting
1098 # src MACs of frames that are not unicast to us
1099 src_mac_list = self.get_macs()
1100 bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
1101 # ports must be set in service in order to enable capture
1102 self.client.set_service_mode(ports=self.port_handle)
1103 self.capture_id = self.client.start_capture \
1104 (rx_ports=self.port_handle, bpf_filter=bpf_filter)
1106 def fetch_capture_packets(self):
1107 """Fetch capture packets in capture mode."""
1109 self.packet_list = []
1110 self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
1111 output=self.packet_list)
1113 def stop_capture(self):
1114 """Stop capturing packets."""
1116 self.client.stop_capture(capture_id=self.capture_id['id'])
1117 self.capture_id = None
1118 # A traffic capture may have been started (from a T-Rex console) at this time.
1119 # If asked so, we keep the service mode enabled here, and disable it otherwise.
1120 # | Disabling the service mode while a capture is in progress
1121 # | would cause the application to stop/crash with an error.
1122 if not self.config.service_mode:
1123 self.client.set_service_mode(ports=self.port_handle, enabled=False)
1126 """Cleanup Trex driver."""
1129 self.client.reset(self.port_handle)
1130 self.client.disconnect()
1132 # TRex does not like a reset while in disconnected state
1135 def set_service_mode(self, enabled=True):
1136 """Enable/disable the 'service' mode."""
1137 self.client.set_service_mode(ports=self.port_handle, enabled=enabled)