1 # Copyright 2016 Cisco Systems, Inc. All rights reserved.
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
14 """Driver module for TRex traffic generator."""
22 from functools import reduce
24 from itertools import count
25 # pylint: disable=import-error
26 from scapy.contrib.mpls import MPLS # flake8: noqa
27 # pylint: enable=import-error
28 from nfvbench.log import LOG
29 from nfvbench.traffic_server import TRexTrafficServer
30 from nfvbench.utils import cast_integer
31 from nfvbench.utils import timeout
32 from nfvbench.utils import TimeoutError
34 from hdrh.histogram import HdrHistogram
36 # pylint: disable=import-error
37 from trex.common.services.trex_service_arp import ServiceARP
38 from trex.stl.api import bind_layers
39 from trex.stl.api import CTRexVmInsFixHwCs
40 from trex.stl.api import Dot1Q
41 from trex.stl.api import Ether
42 from trex.stl.api import FlagsField
43 from trex.stl.api import IP
44 from trex.stl.api import Packet
45 from trex.stl.api import STLClient
46 from trex.stl.api import STLError
47 from trex.stl.api import STLFlowLatencyStats
48 from trex.stl.api import STLFlowStats
49 from trex.stl.api import STLPktBuilder
50 from trex.stl.api import STLScVmRaw
51 from trex.stl.api import STLStream
52 from trex.stl.api import STLTXCont
53 from trex.stl.api import STLVmFixChecksumHw
54 from trex.stl.api import STLVmFixIpv4
55 from trex.stl.api import STLVmFlowVar
56 from trex.stl.api import STLVmFlowVarRepeatableRandom
57 from trex.stl.api import STLVmTupleGen
58 from trex.stl.api import STLVmWrFlowVar
59 from trex.stl.api import ThreeBytesField
60 from trex.stl.api import UDP
61 from trex.stl.api import XByteField
63 # pylint: enable=import-error
65 from .traffic_base import AbstractTrafficGenerator
66 from .traffic_base import TrafficGeneratorException
67 from . import traffic_utils as utils
68 from .traffic_utils import IMIX_AVG_L2_FRAME_SIZE
69 from .traffic_utils import IMIX_L2_SIZES
70 from .traffic_utils import IMIX_RATIOS
75 _VXLAN_FLAGS = ['R' * 27] + ['I'] + ['R' * 5]
77 fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS),
78 ThreeBytesField("vni", 0),
79 XByteField("reserved", 0x00)]
83 return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
85 class TRex(AbstractTrafficGenerator):
86 """TRex traffic generator driver."""
89 CHAIN_PG_ID_MASK = 0x007F
90 PORT_PG_ID_MASK = 0x0080
91 LATENCY_PG_ID_MASK = 0x0100
93 def __init__(self, traffic_client):
95 AbstractTrafficGenerator.__init__(self, traffic_client)
99 self.chain_count = self.generator_config.service_chain_count
101 self.capture_id = None
102 self.packet_list = []
103 self.l2_frame_size = 0
105 def get_version(self):
106 """Get the Trex version."""
107 return self.client.get_server_version() if self.client else ''
109 def get_pg_id(self, port, chain_id):
110 """Calculate the packet group IDs to use for a given port/stream type/chain_id.
113 chain_id: identifies to which chain the pg_id is associated (0 to 255)
114 return: pg_id, lat_pg_id
116 We use a bit mask to set up the 3 fields:
117 0x007F: chain ID (8 bits for a max of 128 chains)
121 pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
122 return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
124 def extract_stats(self, in_stats, ifstats):
125 """Extract stats from dict returned by Trex API.
127 :param in_stats: dict as returned by TRex api
129 utils.nan_replace(in_stats)
130 # LOG.debug(in_stats)
133 # port_handles should have only 2 elements: [0, 1]
134 # so (1 - ph) will be the index for the far end port
135 for ph in self.port_handle:
137 far_end_stats = in_stats[1 - ph]
140 'total_pkts': cast_integer(stats['opackets']),
141 'total_pkt_bytes': cast_integer(stats['obytes']),
142 'pkt_rate': cast_integer(stats['tx_pps']),
143 'pkt_bit_rate': cast_integer(stats['tx_bps'])
146 'total_pkts': cast_integer(stats['ipackets']),
147 'total_pkt_bytes': cast_integer(stats['ibytes']),
148 'pkt_rate': cast_integer(stats['rx_pps']),
149 'pkt_bit_rate': cast_integer(stats['rx_bps']),
150 # how many pkts were dropped in RX direction
151 # need to take the tx counter on the far end port
152 'dropped_pkts': cast_integer(
153 far_end_stats['opackets'] - stats['ipackets'])
156 self.__combine_latencies(in_stats, result[ph]['rx'], ph)
158 total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
159 result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
160 # actual offered tx rate in bps
161 avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
162 total_tx_bps = utils.pps_to_bps(result["total_tx_rate"], avg_packet_size)
163 result['offered_tx_rate_bps'] = total_tx_bps
165 result.update(self.get_theoretical_rates(avg_packet_size))
167 result["flow_stats"] = in_stats["flow_stats"]
168 result["latency"] = in_stats["latency"]
170 # Merge HDRHistogram to have an overall value for all chains and ports
174 for chain_id, _ in enumerate(ifstats):
175 for ph in self.port_handle:
176 _, lat_pg_id = self.get_pg_id(ph, chain_id)
178 HdrHistogram.decode(in_stats['latency'][lat_pg_id]['latency']['hdrh']))
180 for pg_id in in_stats['latency']:
181 if pg_id != 'global':
183 HdrHistogram.decode(in_stats['latency'][pg_id]['latency']['hdrh']))
188 decoded_hdrh = reduce(add_hdrh, hdrh_list)
189 result["hdrh"] = HdrHistogram.encode(decoded_hdrh).decode('utf-8')
195 def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
196 """Extract the aggregated stats for a given chain.
198 trex_stats: stats as returned by get_stats()
199 if_stats: a list of 2 interface stats to update (port 0 and 1)
200 latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
201 latencies[p] is the latency for packets sent on port p
202 if there are no latency streams, the Latency instances are not modified
203 chain_idx: chain index of the interface stats
205 The packet counts include normal and latency streams.
207 Trex returns flows stats as follows:
209 'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
210 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
211 'rx_bytes': {0: nan, 1: nan, 'total': nan},
212 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
213 'rx_pps': {0: 0, 1: 0, 'total': 0},
214 'tx_bps': {0: 0, 1: 0, 'total': 0},
215 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
216 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
217 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
218 'tx_pps': {0: 0, 1: 0, 'total': 0}},
219 1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
220 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
221 'rx_bytes': {0: nan, 1: nan, 'total': nan},
222 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
223 'rx_pps': {0: 0, 1: 0, 'total': 0},
224 'tx_bps': {0: 0, 1: 0, 'total': 0},
225 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
226 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
227 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
228 'tx_pps': {0: 0, 1: 0, 'total': 0}},
229 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
230 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
231 'rx_bytes': {0: nan, 1: nan, 'total': nan},
232 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
233 'rx_pps': {0: 0, 1: 0, 'total': 0},
234 'tx_bps': {0: 0, 1: 0, 'total': 0},
235 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
236 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
237 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
238 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
240 the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
242 packet counters for a given stream sent on port p are reported as:
243 - tx_pkts[p] on port p
244 - rx_pkts[1-p] on the far end port
246 This is a tricky/critical counter transposition operation because
247 the results are grouped by port (not by stream):
248 tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
249 rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
250 tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
251 rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
253 or using a more generic formula:
254 tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
255 rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
257 the second formula is equivalent to
258 rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
260 If there are latency streams, those same counters need to be added in the same way
262 def get_latency(lval):
264 return int(round(lval))
270 for port in range(2):
271 pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
272 for pid in [pg_id, lat_pg_id]:
274 pg_stats = trex_stats['flow_stats'][pid]
275 if_stats[port].tx += pg_stats['tx_pkts'][port]
276 if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
280 lat = trex_stats['latency'][lat_pg_id]['latency']
281 # dropped_pkts += lat['err_cntrs']['dropped']
282 latencies[port].max_usec = get_latency(lat['total_max'])
283 if math.isnan(lat['total_min']):
284 latencies[port].min_usec = 0
285 latencies[port].avg_usec = 0
287 latencies[port].min_usec = get_latency(lat['total_min'])
288 latencies[port].avg_usec = get_latency(lat['average'])
289 # pick up the HDR histogram if present (otherwise will raise KeyError)
290 latencies[port].hdrh = lat['hdrh']
294 def __combine_latencies(self, in_stats, results, port_handle):
295 """Traverse TRex result dictionary and combines chosen latency stats.
297 example of latency dict returned by trex (2 chains):
298 'latency': {256: {'err_cntrs': {'dropped': 0,
303 'latency': {'average': 26.5,
304 'hdrh': u'HISTFAAAAEx4nJNpmSgz...bFRgxi',
305 'histogram': {20: 303,
315 257: {'err_cntrs': {'dropped': 0,
320 'latency': {'average': 29.75,
321 'hdrh': u'HISTFAAAAEV4nJN...CALilDG0=',
322 'histogram': {20: 261,
331 384: {'err_cntrs': {'dropped': 0,
336 'latency': {'average': 18.0,
337 'hdrh': u'HISTFAAAADR4nJNpm...MjCwDDxAZG',
338 'histogram': {20: 987, 30: 14},
343 385: {'err_cntrs': {'dropped': 0,
348 'latency': {'average': 19.0,
349 'hdrh': u'HISTFAAAADR4nJNpm...NkYmJgDdagfK',
350 'histogram': {20: 989, 30: 11},
355 'global': {'bad_hdr': 0, 'old_flow': 0}},
359 total_min = float("inf")
360 for chain_id in range(self.chain_count):
362 _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
363 lat = in_stats['latency'][lat_pg_id]['latency']
364 # dropped_pkts += lat['err_cntrs']['dropped']
365 total_max = max(lat['total_max'], total_max)
366 total_min = min(lat['total_min'], total_min)
367 average += lat['average']
370 if total_min == float("inf"):
372 results['min_delay_usec'] = total_min
373 results['max_delay_usec'] = total_max
374 results['avg_delay_usec'] = int(average / self.chain_count)
376 def _bind_vxlan(self):
377 bind_layers(UDP, VXLAN, dport=4789)
378 bind_layers(VXLAN, Ether)
380 def _create_pkt(self, stream_cfg, l2frame_size, disable_random_latency_flow=False):
381 """Create a packet of given size.
383 l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
385 # Trex will add the FCS field, so we need to remove 4 bytes from the l2 frame size
386 frame_size = int(l2frame_size) - 4
388 if stream_cfg['vxlan'] is True:
391 pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
392 if stream_cfg['vtep_vlan'] is not None:
393 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
394 pkt_base /= IP(src=stream_cfg['vtep_src_ip'], dst=stream_cfg['vtep_dst_ip'])
395 pkt_base /= UDP(sport=random.randint(1337, 32767), dport=4789)
396 pkt_base /= VXLAN(vni=stream_cfg['net_vni'])
397 pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
398 # need to randomize the outer header UDP src port based on flow
399 vxlan_udp_src_fv = STLVmFlowVar(
400 name="vxlan_udp_src",
405 vm_param = [vxlan_udp_src_fv,
406 STLVmWrFlowVar(fv_name="vxlan_udp_src", pkt_offset="UDP.sport")]
407 elif stream_cfg['mpls'] is True:
409 pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
410 if stream_cfg['vtep_vlan'] is not None:
411 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
412 if stream_cfg['mpls_outer_label'] is not None:
413 pkt_base /= MPLS(label=stream_cfg['mpls_outer_label'], cos=1, s=0, ttl=255)
414 if stream_cfg['mpls_inner_label'] is not None:
415 pkt_base /= MPLS(label=stream_cfg['mpls_inner_label'], cos=1, s=1, ttl=255)
416 # Flow stats and MPLS labels randomization TBD
417 pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
420 pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
422 if stream_cfg['vlan_tag'] is not None:
423 pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
426 if stream_cfg['udp_src_port']:
427 udp_args['sport'] = int(stream_cfg['udp_src_port'])
428 if stream_cfg['udp_port_step'] == 'random':
431 step = stream_cfg['udp_port_step']
432 udp_args['sport_step'] = int(step)
433 udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
434 if stream_cfg['udp_dst_port']:
435 udp_args['dport'] = int(stream_cfg['udp_dst_port'])
436 if stream_cfg['udp_port_step'] == 'random':
439 step = stream_cfg['udp_port_step']
440 udp_args['dport_step'] = int(step)
441 udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
443 pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
444 UDP(dport=udp_args['dport'], sport=udp_args['sport'])
446 # STLVmTupleGen need flow count >= cores used by TRex, if FC < cores we used STLVmFlowVar
447 if stream_cfg['ip_addrs_step'] == '0.0.0.1' and stream_cfg['udp_port_step'] == '1' and \
448 stream_cfg['count'] >= self.generator_config.cores:
449 src_fv = STLVmTupleGen(ip_min=stream_cfg['ip_src_addr'],
450 ip_max=stream_cfg['ip_src_addr_max'],
451 port_min=udp_args['sport'],
452 port_max=udp_args['sport_max'],
454 limit_flows=stream_cfg['count'])
455 dst_fv = STLVmTupleGen(ip_min=stream_cfg['ip_dst_addr'],
456 ip_max=stream_cfg['ip_dst_addr_max'],
457 port_min=udp_args['dport'],
458 port_max=udp_args['dport_max'],
460 limit_flows=stream_cfg['count'])
463 STLVmWrFlowVar(fv_name="tuple_src.ip",
464 pkt_offset="IP:{}.src".format(encap_level)),
465 STLVmWrFlowVar(fv_name="tuple_src.port",
466 pkt_offset="UDP:{}.sport".format(encap_level)),
468 STLVmWrFlowVar(fv_name="tuple_dst.ip",
469 pkt_offset="IP:{}.dst".format(encap_level)),
470 STLVmWrFlowVar(fv_name="tuple_dst.port",
471 pkt_offset="UDP:{}.dport".format(encap_level)),
474 if disable_random_latency_flow:
475 src_fv_ip = STLVmFlowVar(
477 min_value=stream_cfg['ip_src_addr'],
478 max_value=stream_cfg['ip_src_addr'],
480 dst_fv_ip = STLVmFlowVar(
482 min_value=stream_cfg['ip_dst_addr'],
483 max_value=stream_cfg['ip_dst_addr'],
485 elif stream_cfg['ip_addrs_step'] == 'random':
486 src_fv_ip = STLVmFlowVarRepeatableRandom(
488 min_value=stream_cfg['ip_src_addr'],
489 max_value=stream_cfg['ip_src_addr_max'],
491 seed=random.randint(0, 32767),
492 limit=stream_cfg['ip_src_count'])
493 dst_fv_ip = STLVmFlowVarRepeatableRandom(
495 min_value=stream_cfg['ip_dst_addr'],
496 max_value=stream_cfg['ip_dst_addr_max'],
498 seed=random.randint(0, 32767),
499 limit=stream_cfg['ip_dst_count'])
501 src_fv_ip = STLVmFlowVar(
503 min_value=stream_cfg['ip_src_addr'],
504 max_value=stream_cfg['ip_src_addr_max'],
507 step=stream_cfg['ip_addrs_step'])
508 dst_fv_ip = STLVmFlowVar(
510 min_value=stream_cfg['ip_dst_addr'],
511 max_value=stream_cfg['ip_dst_addr_max'],
514 step=stream_cfg['ip_addrs_step'])
516 if disable_random_latency_flow:
517 src_fv_port = STLVmFlowVar(
519 min_value=udp_args['sport'],
520 max_value=udp_args['sport'],
522 dst_fv_port = STLVmFlowVar(
524 min_value=udp_args['dport'],
525 max_value=udp_args['dport'],
527 elif stream_cfg['udp_port_step'] == 'random':
528 src_fv_port = STLVmFlowVarRepeatableRandom(
530 min_value=udp_args['sport'],
531 max_value=udp_args['sport_max'],
533 seed=random.randint(0, 32767),
534 limit=stream_cfg['udp_src_count'])
535 dst_fv_port = STLVmFlowVarRepeatableRandom(
537 min_value=udp_args['dport'],
538 max_value=udp_args['dport_max'],
540 seed=random.randint(0, 32767),
541 limit=stream_cfg['udp_dst_count'])
543 src_fv_port = STLVmFlowVar(
545 min_value=udp_args['sport'],
546 max_value=udp_args['sport_max'],
549 step=udp_args['sport_step'])
550 dst_fv_port = STLVmFlowVar(
552 min_value=udp_args['dport'],
553 max_value=udp_args['dport_max'],
556 step=udp_args['dport_step'])
559 STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
561 STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
563 STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
565 STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
567 # Use HW Offload to calculate the outter IP/UDP packet
568 vm_param.append(STLVmFixChecksumHw(l3_offset="IP:0",
570 l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP))
571 # Use software to fix the inner IP/UDP payload for VxLAN packets
573 vm_param.append(STLVmFixIpv4(offset="IP:1"))
574 pad = max(0, frame_size - len(pkt_base)) * 'x'
576 return STLPktBuilder(pkt=pkt_base / pad,
577 vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
579 def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
581 """Create a list of streams corresponding to a given chain and stream config.
583 port: port where the streams originate (0 or 1)
584 chain_id: the chain to which the streams are associated to
585 stream_cfg: stream configuration
586 l2frame: L2 frame size (including 4-byte FCS) or 'IMIX'
587 latency: if True also create a latency stream
588 e2e: True if performing "end to end" connectivity check
591 pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
592 if l2frame == 'IMIX':
593 for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
594 pkt = self._create_pkt(stream_cfg, l2_frame_size)
595 if e2e or stream_cfg['mpls']:
596 streams.append(STLStream(packet=pkt,
597 mode=STLTXCont(pps=ratio)))
599 if stream_cfg['vxlan'] is True:
600 streams.append(STLStream(packet=pkt,
601 flow_stats=STLFlowStats(pg_id=pg_id,
603 if not self.config.no_flow_stats else None,
604 mode=STLTXCont(pps=ratio)))
606 streams.append(STLStream(packet=pkt,
607 flow_stats=STLFlowStats(pg_id=pg_id)
608 if not self.config.no_flow_stats else None,
609 mode=STLTXCont(pps=ratio)))
612 # for IMIX, the latency packets have the average IMIX packet size
613 if stream_cfg['ip_addrs_step'] == 'random' or \
614 stream_cfg['udp_port_step'] == 'random':
615 # Force latency flow to only one flow to avoid creating flows
616 # over requested flow count
617 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE, True)
619 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
622 l2frame_size = int(l2frame)
623 pkt = self._create_pkt(stream_cfg, l2frame_size)
624 if e2e or stream_cfg['mpls']:
625 streams.append(STLStream(packet=pkt,
626 # Flow stats is disabled for MPLS now
627 # flow_stats=STLFlowStats(pg_id=pg_id),
630 if stream_cfg['vxlan'] is True:
631 streams.append(STLStream(packet=pkt,
632 flow_stats=STLFlowStats(pg_id=pg_id,
634 if not self.config.no_flow_stats else None,
637 streams.append(STLStream(packet=pkt,
638 flow_stats=STLFlowStats(pg_id=pg_id)
639 if not self.config.no_flow_stats else None,
641 # for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
642 # without vlan, the min l2 frame size is 64
644 # This only applies to the latency stream
646 if stream_cfg['vlan_tag'] and l2frame_size < 68:
648 if stream_cfg['ip_addrs_step'] == 'random' or \
649 stream_cfg['udp_port_step'] == 'random':
650 # Force latency flow to only one flow to avoid creating flows
651 # over requested flow count
652 pkt = self._create_pkt(stream_cfg, l2frame_size, True)
654 pkt = self._create_pkt(stream_cfg, l2frame_size)
657 if self.config.no_latency_stats:
658 LOG.info("Latency flow statistics are disabled.")
659 if stream_cfg['vxlan'] is True:
660 streams.append(STLStream(packet=pkt,
661 flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
663 if not self.config.no_latency_stats else None,
664 mode=STLTXCont(pps=self.LATENCY_PPS)))
666 streams.append(STLStream(packet=pkt,
667 flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
668 if not self.config.no_latency_stats else None,
669 mode=STLTXCont(pps=self.LATENCY_PPS)))
673 def __connect(self, client):
676 def __local_server_status(self):
677 """ The TRex server may have started but failed initializing... and stopped.
678 This piece of code is especially designed to address
679 the case when a fatal failure occurs on a DPDK init call.
680 The TRex algorihm should be revised to include some missing timeouts (?)
683 1: fatal error detected - should lead to exiting the run
684 2: error detected that could be solved by starting again
685 The diagnostic is based on parsing the local trex log file (improvable)
697 with open('/tmp/trex.log', 'r') as trex_log:
698 for _line in trex_log:
700 if line.startswith('Usage:'):
702 if 'ports are bound' in line:
704 if 'please wait' in line:
706 if 'exit' in line.lower():
708 elif 'cause' in line.lower():
710 elif 'fail' in line.lower():
712 elif 'msg' in line.lower():
714 elif (error is not None) and line:
716 elif line.startswith('Error:') or line.startswith('ERROR'):
720 except FileNotFoundError:
722 if exited is not None:
724 LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', exited)
725 if cause is not None:
726 LOG.info("TRex [cont'd] %s", cause)
727 if failure is not None:
728 LOG.info("TRex [cont'd] %s", failure)
729 if message is not None:
730 LOG.info("TRex [cont'd] %s", message)
731 if 'not supported yet' in message.lower():
732 LOG.info("TRex [cont'd] Try starting again!")
734 elif error is not None:
736 LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', error)
737 if after is not None:
738 LOG.info("TRex [cont'd] %s", after)
739 elif before is not None:
740 LOG.info("TRex [cont'd] %s", before)
743 def __connect_after_start(self):
744 # after start, Trex may take a bit of time to initialize
745 # so we need to retry a few times
746 # we try to capture recoverable error cases (checking status)
748 for it in range(self.config.generic_retry_count):
751 self.client.connect()
753 except Exception as ex:
754 if it == (self.config.generic_retry_count - 1):
756 status = self.__local_server_status()
758 # No need to wait anymore, something went wrong and TRex exited
760 LOG.info("\x1b[1m%s\x1b[0m", 'TRex failed starting!')
761 print("More information? Try the command: "
762 + "\x1b[1mnfvbench --show-trex-log\x1b[0m")
765 # a new start will follow
767 LOG.info("Retrying connection to TRex (%s)...", ex.msg)
771 """Connect to the TRex server."""
773 server_ip = self.generator_config.ip
774 LOG.info("Connecting to TRex (%s)...", server_ip)
776 # Connect to TRex server
777 self.client = STLClient(server=server_ip, sync_port=self.generator_config.zmq_rpc_port,
778 async_port=self.generator_config.zmq_pub_port)
780 self.__connect(self.client)
781 if server_ip == '127.0.0.1':
782 config_updated = self.__check_config()
783 if config_updated or self.config.restart:
784 status = self.__restart()
785 except (TimeoutError, STLError) as e:
786 if server_ip == '127.0.0.1':
787 status = self.__start_local_server()
789 raise TrafficGeneratorException(e.message) from e
792 # Workaround in case of a failed TRex server initialization
793 # we try to start it again (twice maximum)
794 # which may allow low level initialization to complete.
795 if self.__start_local_server() == 2:
796 self.__start_local_server()
798 ports = list(self.generator_config.ports)
799 self.port_handle = ports
801 self.client.reset(ports)
802 # Read HW information from each port
803 # this returns an array of dict (1 per port)
805 Example of output for Intel XL710
806 [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
807 'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
808 u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
809 u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
810 u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
811 'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
812 u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
813 'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
814 'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
815 'layer_mode': 'Ethernet', u'numa': 0}, ...]
817 self.port_info = self.client.get_port_info(ports)
818 LOG.info('Connected to TRex')
819 for id, port in enumerate(self.port_info):
820 LOG.info(' Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
821 id, port['description'], port['speed'], port['src_mac'],
822 port['pci_addr'], port['driver'])
823 # Make sure the 2 ports have the same speed
824 if self.port_info[0]['speed'] != self.port_info[1]['speed']:
825 raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
826 (self.port_info[0]['speed'],
827 self.port_info[1]['speed']))
829 def __start_local_server(self):
831 LOG.info("Starting TRex ...")
832 self.__start_server()
833 status = self.__connect_after_start()
834 except (TimeoutError, STLError) as e:
835 LOG.error('Cannot connect to TRex')
836 LOG.error(traceback.format_exc())
837 logpath = '/tmp/trex.log'
838 if os.path.isfile(logpath):
839 # Wait for TRex to finish writing error message
841 for _ in range(self.config.generic_retry_count):
842 size = os.path.getsize(logpath)
843 if size == last_size:
844 # probably not writing anymore
848 with open(logpath, 'r') as f:
852 raise TrafficGeneratorException(message) from e
855 def __start_server(self):
856 server = TRexTrafficServer()
857 server.run_server(self.generator_config)
859 def __check_config(self):
860 server = TRexTrafficServer()
861 return server.check_config_updated(self.generator_config)
864 LOG.info("Restarting TRex ...")
866 # Wait for server stopped
867 for _ in range(self.config.generic_retry_count):
869 if not self.client.is_connected():
870 LOG.info("TRex is stopped...")
872 # Start and report a possible failure
873 return self.__start_local_server()
875 def __stop_server(self):
876 if self.generator_config.ip == '127.0.0.1':
877 ports = self.client.get_acquired_ports()
878 LOG.info('Release ports %s and stopping TRex...', ports)
881 self.client.release(ports=ports)
882 self.client.server_shutdown()
883 except STLError as e:
884 LOG.warning('Unable to stop TRex. Error: %s', e)
886 LOG.info('Using remote TRex. Unable to stop TRex')
888 def resolve_arp(self):
889 """Resolve all configured remote IP addresses.
891 return: None if ARP failed to resolve for all IP addresses
892 else a dict of list of dest macs indexed by port#
893 the dest macs in the list are indexed by the chain id
895 self.client.set_service_mode(ports=self.port_handle)
896 LOG.info('Polling ARP until successful...')
898 for port, device in zip(self.port_handle, self.generator_config.devices):
899 # there should be 1 stream config per chain
900 stream_configs = device.get_stream_configs()
901 chain_count = len(stream_configs)
902 ctx = self.client.create_service_ctx(port=port)
903 # all dest macs on this port indexed by chain ID
904 dst_macs = [None] * chain_count
906 # the index in the list is the chain id
907 if self.config.vxlan or self.config.mpls:
910 src_ip=device.vtep_src_ip,
911 dst_ip=device.vtep_dst_ip,
912 vlan=device.vtep_vlan)
913 for cfg in stream_configs
918 src_ip=cfg['ip_src_tg_gw'],
919 dst_ip=cfg['mac_discovery_gw'],
920 # will be None if no vlan tagging
921 vlan=cfg['vlan_tag'])
922 for cfg in stream_configs
925 for attempt in range(self.config.generic_retry_count):
929 LOG.error(traceback.format_exc())
933 for chain_id, mac in enumerate(dst_macs):
935 arp_record = arps[chain_id].get_record()
936 if arp_record.dst_mac:
937 dst_macs[chain_id] = arp_record.dst_mac
939 LOG.info(' ARP: port=%d chain=%d src IP=%s dst IP=%s -> MAC=%s',
942 arp_record.dst_ip, arp_record.dst_mac)
944 unresolved.append(arp_record.dst_ip)
945 if dst_macs_count == chain_count:
946 arp_dest_macs[port] = dst_macs
947 LOG.info('ARP resolved successfully for port %s', port)
951 LOG.info('Retrying ARP for: %s (retry %d/%d)',
952 unresolved, retry, self.config.generic_retry_count)
953 if retry < self.config.generic_retry_count:
954 time.sleep(self.config.generic_poll_sec)
956 LOG.error('ARP timed out for port %s (resolved %d out of %d)',
962 # A traffic capture may have been started (from a T-Rex console) at this time.
963 # If asked so, we keep the service mode enabled here, and disable it otherwise.
964 # | Disabling the service mode while a capture is in progress
965 # | would cause the application to stop/crash with an error.
966 if not self.config.service_mode:
967 self.client.set_service_mode(ports=self.port_handle, enabled=False)
968 if len(arp_dest_macs) == len(self.port_handle):
972 def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
973 """Check if rate provided by user is above requirements. Applies only if latency is True."""
974 intf_speed = self.generator_config.intf_speed
980 r = utils.convert_rates(l2frame_size, rate, intf_speed)
981 total_rate += int(r['rate_pps'])
984 r = utils.convert_rates(l2frame_size, rates[0], intf_speed)
985 total_rate = int(r['rate_pps'])
986 # rate must be enough for latency stream and at least 1 pps for base stream per chain
987 required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
988 result = utils.convert_rates(l2frame_size,
989 {'rate_pps': required_rate},
991 result['result'] = total_rate >= required_rate
994 return {'result': True}
996 def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
997 """Program all the streams in Trex server.
999 l2frame_size: L2 frame size or IMIX
1000 rates: a list of 2 rates to run each direction
1001 each rate is a dict like {'rate_pps': '10kpps'}
1002 bidirectional: True if bidirectional
1003 latency: True if latency measurement is needed
1004 e2e: True if performing "end to end" connectivity check
1006 if self.config.no_flow_stats:
1007 LOG.info("Traffic flow statistics are disabled.")
1008 r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
1010 raise TrafficGeneratorException(
1011 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
1012 .format(pps=r['rate_pps'],
1014 load=r['rate_percent']))
1015 self.l2_frame_size = l2frame_size
1016 # a dict of list of streams indexed by port#
1017 # in case of fixed size, has self.chain_count * 2 * 2 streams
1018 # (1 normal + 1 latency stream per direction per chain)
1019 # for IMIX, has self.chain_count * 2 * 4 streams
1020 # (3 normal + 1 latency stream per direction per chain)
1022 for port in self.port_handle:
1023 streamblock[port] = []
1024 stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
1025 if self.generator_config.ip_addrs_step == 'random' \
1026 or self.generator_config.gen_config.udp_port_step == 'random':
1027 LOG.warning("Using random step, the number of flows can be less than "
1028 "the requested number of flows due to repeatable multivariate random "
1029 "generation which can reproduce the same pattern of values")
1030 self.rates = [utils.to_rate_str(rate) for rate in rates]
1031 for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
1032 streamblock[0].extend(self.generate_streams(self.port_handle[0],
1038 if len(self.rates) > 1:
1039 streamblock[1].extend(self.generate_streams(self.port_handle[1],
1043 latency=bidirectional and latency,
1046 for port in self.port_handle:
1047 if self.config.vxlan:
1048 self.client.set_port_attr(ports=port, vxlan_fs=[4789])
1050 self.client.set_port_attr(ports=port, vxlan_fs=None)
1051 self.client.add_streams(streamblock[port], ports=port)
1052 LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
1054 def clear_streamblock(self):
1055 """Clear all streams from TRex."""
1057 self.client.reset(self.port_handle)
1058 LOG.info('Cleared all existing streams')
1060 def get_stats(self, if_stats=None):
1061 """Get stats from Trex."""
1062 stats = self.client.get_stats()
1063 return self.extract_stats(stats, if_stats)
1066 """Return the Trex local port MAC addresses.
1068 return: a list of MAC addresses indexed by the port#
1070 return [port['src_mac'] for port in self.port_info]
1072 def get_port_speed_gbps(self):
1073 """Return the Trex local port MAC addresses.
1075 return: a list of speed in Gbps indexed by the port#
1077 return [port['speed'] for port in self.port_info]
1079 def clear_stats(self):
1080 """Clear all stats in the traffic gneerator."""
1081 if self.port_handle:
1082 self.client.clear_stats()
1084 def start_traffic(self):
1085 """Start generating traffic in all ports."""
1086 for port, rate in zip(self.port_handle, self.rates):
1087 self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
1089 def stop_traffic(self):
1090 """Stop generating traffic."""
1091 self.client.stop(ports=self.port_handle)
1093 def start_capture(self):
1094 """Capture all packets on both ports that are unicast to us."""
1097 # Need to filter out unwanted packets so we do not end up counting
1098 # src MACs of frames that are not unicast to us
1099 src_mac_list = self.get_macs()
1100 bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
1101 # ports must be set in service in order to enable capture
1102 self.client.set_service_mode(ports=self.port_handle)
1103 self.capture_id = self.client.start_capture \
1104 (rx_ports=self.port_handle, bpf_filter=bpf_filter)
1106 def fetch_capture_packets(self):
1107 """Fetch capture packets in capture mode."""
1109 self.packet_list = []
1110 self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
1111 output=self.packet_list)
1113 def stop_capture(self):
1114 """Stop capturing packets."""
1116 self.client.stop_capture(capture_id=self.capture_id['id'])
1117 self.capture_id = None
1118 # A traffic capture may have been started (from a T-Rex console) at this time.
1119 # If asked so, we keep the service mode enabled here, and disable it otherwise.
1120 # | Disabling the service mode while a capture is in progress
1121 # | would cause the application to stop/crash with an error.
1122 if not self.config.service_mode:
1123 self.client.set_service_mode(ports=self.port_handle, enabled=False)
1126 """Cleanup Trex driver."""
1129 self.client.reset(self.port_handle)
1130 self.client.disconnect()
1132 # TRex does not like a reset while in disconnected state
1135 def set_service_mode(self, enabled=True):
1136 """Enable/disable the 'service' mode."""
1137 self.client.set_service_mode(ports=self.port_handle, enabled=enabled)