1 # Copyright 2016 Cisco Systems, Inc. All rights reserved.
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
14 """Driver module for TRex traffic generator."""
21 from functools import reduce
23 from itertools import count
24 # pylint: disable=import-error
25 from scapy.contrib.mpls import MPLS # flake8: noqa
26 # pylint: enable=import-error
27 from nfvbench.log import LOG
28 from nfvbench.traffic_server import TRexTrafficServer
29 from nfvbench.utils import cast_integer
30 from nfvbench.utils import timeout
31 from nfvbench.utils import TimeoutError
33 from hdrh.histogram import HdrHistogram
35 # pylint: disable=import-error
36 from trex.common.services.trex_service_arp import ServiceARP
37 from trex.stl.api import bind_layers
38 from trex.stl.api import CTRexVmInsFixHwCs
39 from trex.stl.api import Dot1Q
40 from trex.stl.api import Ether
41 from trex.stl.api import FlagsField
42 from trex.stl.api import IP
43 from trex.stl.api import Packet
44 from trex.stl.api import STLClient
45 from trex.stl.api import STLError
46 from trex.stl.api import STLFlowLatencyStats
47 from trex.stl.api import STLFlowStats
48 from trex.stl.api import STLPktBuilder
49 from trex.stl.api import STLScVmRaw
50 from trex.stl.api import STLStream
51 from trex.stl.api import STLTXCont
52 from trex.stl.api import STLVmFixChecksumHw
53 from trex.stl.api import STLVmFixIpv4
54 from trex.stl.api import STLVmFlowVar
55 from trex.stl.api import STLVmFlowVarRepeatableRandom
56 from trex.stl.api import STLVmTupleGen
57 from trex.stl.api import STLVmWrFlowVar
58 from trex.stl.api import ThreeBytesField
59 from trex.stl.api import UDP
60 from trex.stl.api import XByteField
62 # pylint: enable=import-error
64 from .traffic_base import AbstractTrafficGenerator
65 from .traffic_base import TrafficGeneratorException
66 from . import traffic_utils as utils
67 from .traffic_utils import IMIX_AVG_L2_FRAME_SIZE
68 from .traffic_utils import IMIX_L2_SIZES
69 from .traffic_utils import IMIX_RATIOS
74 _VXLAN_FLAGS = ['R' * 27] + ['I'] + ['R' * 5]
76 fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS),
77 ThreeBytesField("vni", 0),
78 XByteField("reserved", 0x00)]
82 return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
84 class TRex(AbstractTrafficGenerator):
85 """TRex traffic generator driver."""
88 CHAIN_PG_ID_MASK = 0x007F
89 PORT_PG_ID_MASK = 0x0080
90 LATENCY_PG_ID_MASK = 0x0100
92 def __init__(self, traffic_client):
94 AbstractTrafficGenerator.__init__(self, traffic_client)
98 self.chain_count = self.generator_config.service_chain_count
100 self.capture_id = None
101 self.packet_list = []
102 self.l2_frame_size = 0
104 def get_version(self):
105 """Get the Trex version."""
106 return self.client.get_server_version() if self.client else ''
108 def get_pg_id(self, port, chain_id):
109 """Calculate the packet group IDs to use for a given port/stream type/chain_id.
112 chain_id: identifies to which chain the pg_id is associated (0 to 255)
113 return: pg_id, lat_pg_id
115 We use a bit mask to set up the 3 fields:
116 0x007F: chain ID (8 bits for a max of 128 chains)
120 pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
121 return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
123 def extract_stats(self, in_stats, ifstats):
124 """Extract stats from dict returned by Trex API.
126 :param in_stats: dict as returned by TRex api
128 utils.nan_replace(in_stats)
129 # LOG.debug(in_stats)
132 # port_handles should have only 2 elements: [0, 1]
133 # so (1 - ph) will be the index for the far end port
134 for ph in self.port_handle:
136 far_end_stats = in_stats[1 - ph]
139 'total_pkts': cast_integer(stats['opackets']),
140 'total_pkt_bytes': cast_integer(stats['obytes']),
141 'pkt_rate': cast_integer(stats['tx_pps']),
142 'pkt_bit_rate': cast_integer(stats['tx_bps'])
145 'total_pkts': cast_integer(stats['ipackets']),
146 'total_pkt_bytes': cast_integer(stats['ibytes']),
147 'pkt_rate': cast_integer(stats['rx_pps']),
148 'pkt_bit_rate': cast_integer(stats['rx_bps']),
149 # how many pkts were dropped in RX direction
150 # need to take the tx counter on the far end port
151 'dropped_pkts': cast_integer(
152 far_end_stats['opackets'] - stats['ipackets'])
155 self.__combine_latencies(in_stats, result[ph]['rx'], ph)
157 total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
158 result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
159 # actual offered tx rate in bps
160 avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
161 total_tx_bps = utils.pps_to_bps(result["total_tx_rate"], avg_packet_size)
162 result['offered_tx_rate_bps'] = total_tx_bps
164 result.update(self.get_theoretical_rates(avg_packet_size))
166 result["flow_stats"] = in_stats["flow_stats"]
167 result["latency"] = in_stats["latency"]
169 # Merge HDRHistogram to have an overall value for all chains and ports
173 for chain_id, _ in enumerate(ifstats):
174 for ph in self.port_handle:
175 _, lat_pg_id = self.get_pg_id(ph, chain_id)
177 HdrHistogram.decode(in_stats['latency'][lat_pg_id]['latency']['hdrh']))
179 for pg_id in in_stats['latency']:
180 if pg_id != 'global':
182 HdrHistogram.decode(in_stats['latency'][pg_id]['latency']['hdrh']))
187 decoded_hdrh = reduce(add_hdrh, hdrh_list)
188 result["hdrh"] = HdrHistogram.encode(decoded_hdrh).decode('utf-8')
194 def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
195 """Extract the aggregated stats for a given chain.
197 trex_stats: stats as returned by get_stats()
198 if_stats: a list of 2 interface stats to update (port 0 and 1)
199 latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
200 latencies[p] is the latency for packets sent on port p
201 if there are no latency streams, the Latency instances are not modified
202 chain_idx: chain index of the interface stats
204 The packet counts include normal and latency streams.
206 Trex returns flows stats as follows:
208 'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
209 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
210 'rx_bytes': {0: nan, 1: nan, 'total': nan},
211 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
212 'rx_pps': {0: 0, 1: 0, 'total': 0},
213 'tx_bps': {0: 0, 1: 0, 'total': 0},
214 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
215 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
216 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
217 'tx_pps': {0: 0, 1: 0, 'total': 0}},
218 1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
219 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
220 'rx_bytes': {0: nan, 1: nan, 'total': nan},
221 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
222 'rx_pps': {0: 0, 1: 0, 'total': 0},
223 'tx_bps': {0: 0, 1: 0, 'total': 0},
224 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
225 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
226 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
227 'tx_pps': {0: 0, 1: 0, 'total': 0}},
228 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
229 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
230 'rx_bytes': {0: nan, 1: nan, 'total': nan},
231 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
232 'rx_pps': {0: 0, 1: 0, 'total': 0},
233 'tx_bps': {0: 0, 1: 0, 'total': 0},
234 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
235 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
236 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
237 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
239 the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
241 packet counters for a given stream sent on port p are reported as:
242 - tx_pkts[p] on port p
243 - rx_pkts[1-p] on the far end port
245 This is a tricky/critical counter transposition operation because
246 the results are grouped by port (not by stream):
247 tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
248 rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
249 tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
250 rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
252 or using a more generic formula:
253 tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
254 rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
256 the second formula is equivalent to
257 rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
259 If there are latency streams, those same counters need to be added in the same way
261 def get_latency(lval):
263 return int(round(lval))
269 for port in range(2):
270 pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
271 for pid in [pg_id, lat_pg_id]:
273 pg_stats = trex_stats['flow_stats'][pid]
274 if_stats[port].tx += pg_stats['tx_pkts'][port]
275 if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
279 lat = trex_stats['latency'][lat_pg_id]['latency']
280 # dropped_pkts += lat['err_cntrs']['dropped']
281 latencies[port].max_usec = get_latency(lat['total_max'])
282 if math.isnan(lat['total_min']):
283 latencies[port].min_usec = 0
284 latencies[port].avg_usec = 0
286 latencies[port].min_usec = get_latency(lat['total_min'])
287 latencies[port].avg_usec = get_latency(lat['average'])
288 # pick up the HDR histogram if present (otherwise will raise KeyError)
289 latencies[port].hdrh = lat['hdrh']
293 def __combine_latencies(self, in_stats, results, port_handle):
294 """Traverse TRex result dictionary and combines chosen latency stats.
296 example of latency dict returned by trex (2 chains):
297 'latency': {256: {'err_cntrs': {'dropped': 0,
302 'latency': {'average': 26.5,
303 'hdrh': u'HISTFAAAAEx4nJNpmSgz...bFRgxi',
304 'histogram': {20: 303,
314 257: {'err_cntrs': {'dropped': 0,
319 'latency': {'average': 29.75,
320 'hdrh': u'HISTFAAAAEV4nJN...CALilDG0=',
321 'histogram': {20: 261,
330 384: {'err_cntrs': {'dropped': 0,
335 'latency': {'average': 18.0,
336 'hdrh': u'HISTFAAAADR4nJNpm...MjCwDDxAZG',
337 'histogram': {20: 987, 30: 14},
342 385: {'err_cntrs': {'dropped': 0,
347 'latency': {'average': 19.0,
348 'hdrh': u'HISTFAAAADR4nJNpm...NkYmJgDdagfK',
349 'histogram': {20: 989, 30: 11},
354 'global': {'bad_hdr': 0, 'old_flow': 0}},
358 total_min = float("inf")
359 for chain_id in range(self.chain_count):
361 _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
362 lat = in_stats['latency'][lat_pg_id]['latency']
363 # dropped_pkts += lat['err_cntrs']['dropped']
364 total_max = max(lat['total_max'], total_max)
365 total_min = min(lat['total_min'], total_min)
366 average += lat['average']
369 if total_min == float("inf"):
371 results['min_delay_usec'] = total_min
372 results['max_delay_usec'] = total_max
373 results['avg_delay_usec'] = int(average / self.chain_count)
375 def _bind_vxlan(self):
376 bind_layers(UDP, VXLAN, dport=4789)
377 bind_layers(VXLAN, Ether)
379 def _create_pkt(self, stream_cfg, l2frame_size, disable_random_latency_flow=False):
380 """Create a packet of given size.
382 l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
384 # Trex will add the FCS field, so we need to remove 4 bytes from the l2 frame size
385 frame_size = int(l2frame_size) - 4
387 if stream_cfg['vxlan'] is True:
390 pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
391 if stream_cfg['vtep_vlan'] is not None:
392 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
393 pkt_base /= IP(src=stream_cfg['vtep_src_ip'], dst=stream_cfg['vtep_dst_ip'])
394 pkt_base /= UDP(sport=random.randint(1337, 32767), dport=4789)
395 pkt_base /= VXLAN(vni=stream_cfg['net_vni'])
396 pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
397 # need to randomize the outer header UDP src port based on flow
398 vxlan_udp_src_fv = STLVmFlowVar(
399 name="vxlan_udp_src",
404 vm_param = [vxlan_udp_src_fv,
405 STLVmWrFlowVar(fv_name="vxlan_udp_src", pkt_offset="UDP.sport")]
406 elif stream_cfg['mpls'] is True:
408 pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
409 if stream_cfg['vtep_vlan'] is not None:
410 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
411 if stream_cfg['mpls_outer_label'] is not None:
412 pkt_base /= MPLS(label=stream_cfg['mpls_outer_label'], cos=1, s=0, ttl=255)
413 if stream_cfg['mpls_inner_label'] is not None:
414 pkt_base /= MPLS(label=stream_cfg['mpls_inner_label'], cos=1, s=1, ttl=255)
415 # Flow stats and MPLS labels randomization TBD
416 pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
419 pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
421 if stream_cfg['vlan_tag'] is not None:
422 pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
425 if stream_cfg['udp_src_port']:
426 udp_args['sport'] = int(stream_cfg['udp_src_port'])
427 if stream_cfg['udp_port_step'] == 'random':
430 step = stream_cfg['udp_port_step']
431 udp_args['sport_step'] = int(step)
432 udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
433 if stream_cfg['udp_dst_port']:
434 udp_args['dport'] = int(stream_cfg['udp_dst_port'])
435 if stream_cfg['udp_port_step'] == 'random':
438 step = stream_cfg['udp_port_step']
439 udp_args['dport_step'] = int(step)
440 udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
442 pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
443 UDP(dport=udp_args['dport'], sport=udp_args['sport'])
445 # STLVmTupleGen need flow count >= cores used by TRex, if FC < cores we used STLVmFlowVar
446 if stream_cfg['ip_addrs_step'] == '0.0.0.1' and stream_cfg['udp_port_step'] == '1' and \
447 stream_cfg['count'] >= self.generator_config.cores:
448 src_fv = STLVmTupleGen(ip_min=stream_cfg['ip_src_addr'],
449 ip_max=stream_cfg['ip_src_addr_max'],
450 port_min=udp_args['sport'],
451 port_max=udp_args['sport_max'],
453 limit_flows=stream_cfg['count'])
454 dst_fv = STLVmTupleGen(ip_min=stream_cfg['ip_dst_addr'],
455 ip_max=stream_cfg['ip_dst_addr_max'],
456 port_min=udp_args['dport'],
457 port_max=udp_args['dport_max'],
459 limit_flows=stream_cfg['count'])
462 STLVmWrFlowVar(fv_name="tuple_src.ip",
463 pkt_offset="IP:{}.src".format(encap_level)),
464 STLVmWrFlowVar(fv_name="tuple_src.port",
465 pkt_offset="UDP:{}.sport".format(encap_level)),
467 STLVmWrFlowVar(fv_name="tuple_dst.ip",
468 pkt_offset="IP:{}.dst".format(encap_level)),
469 STLVmWrFlowVar(fv_name="tuple_dst.port",
470 pkt_offset="UDP:{}.dport".format(encap_level)),
473 if disable_random_latency_flow:
474 src_fv_ip = STLVmFlowVar(
476 min_value=stream_cfg['ip_src_addr'],
477 max_value=stream_cfg['ip_src_addr'],
479 dst_fv_ip = STLVmFlowVar(
481 min_value=stream_cfg['ip_dst_addr'],
482 max_value=stream_cfg['ip_dst_addr'],
484 elif stream_cfg['ip_addrs_step'] == 'random':
485 src_fv_ip = STLVmFlowVarRepeatableRandom(
487 min_value=stream_cfg['ip_src_addr'],
488 max_value=stream_cfg['ip_src_addr_max'],
490 seed=random.randint(0, 32767),
491 limit=stream_cfg['ip_src_count'])
492 dst_fv_ip = STLVmFlowVarRepeatableRandom(
494 min_value=stream_cfg['ip_dst_addr'],
495 max_value=stream_cfg['ip_dst_addr_max'],
497 seed=random.randint(0, 32767),
498 limit=stream_cfg['ip_dst_count'])
500 src_fv_ip = STLVmFlowVar(
502 min_value=stream_cfg['ip_src_addr'],
503 max_value=stream_cfg['ip_src_addr_max'],
506 step=stream_cfg['ip_addrs_step'])
507 dst_fv_ip = STLVmFlowVar(
509 min_value=stream_cfg['ip_dst_addr'],
510 max_value=stream_cfg['ip_dst_addr_max'],
513 step=stream_cfg['ip_addrs_step'])
515 if disable_random_latency_flow:
516 src_fv_port = STLVmFlowVar(
518 min_value=udp_args['sport'],
519 max_value=udp_args['sport'],
521 dst_fv_port = STLVmFlowVar(
523 min_value=udp_args['dport'],
524 max_value=udp_args['dport'],
526 elif stream_cfg['udp_port_step'] == 'random':
527 src_fv_port = STLVmFlowVarRepeatableRandom(
529 min_value=udp_args['sport'],
530 max_value=udp_args['sport_max'],
532 seed=random.randint(0, 32767),
533 limit=stream_cfg['udp_src_count'])
534 dst_fv_port = STLVmFlowVarRepeatableRandom(
536 min_value=udp_args['dport'],
537 max_value=udp_args['dport_max'],
539 seed=random.randint(0, 32767),
540 limit=stream_cfg['udp_dst_count'])
542 src_fv_port = STLVmFlowVar(
544 min_value=udp_args['sport'],
545 max_value=udp_args['sport_max'],
548 step=udp_args['sport_step'])
549 dst_fv_port = STLVmFlowVar(
551 min_value=udp_args['dport'],
552 max_value=udp_args['dport_max'],
555 step=udp_args['dport_step'])
558 STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
560 STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
562 STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
564 STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
566 # Use HW Offload to calculate the outter IP/UDP packet
567 vm_param.append(STLVmFixChecksumHw(l3_offset="IP:0",
569 l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP))
570 # Use software to fix the inner IP/UDP payload for VxLAN packets
572 vm_param.append(STLVmFixIpv4(offset="IP:1"))
573 pad = max(0, frame_size - len(pkt_base)) * 'x'
575 return STLPktBuilder(pkt=pkt_base / pad,
576 vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
578 def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
580 """Create a list of streams corresponding to a given chain and stream config.
582 port: port where the streams originate (0 or 1)
583 chain_id: the chain to which the streams are associated to
584 stream_cfg: stream configuration
585 l2frame: L2 frame size (including 4-byte FCS) or 'IMIX'
586 latency: if True also create a latency stream
587 e2e: True if performing "end to end" connectivity check
590 pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
591 if self.config.no_flow_stats:
592 LOG.info("Traffic flow statistics are disabled.")
593 if l2frame == 'IMIX':
594 for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
595 pkt = self._create_pkt(stream_cfg, l2_frame_size)
596 if e2e or stream_cfg['mpls']:
597 streams.append(STLStream(packet=pkt,
598 mode=STLTXCont(pps=ratio)))
600 if stream_cfg['vxlan'] is True:
601 streams.append(STLStream(packet=pkt,
602 flow_stats=STLFlowStats(pg_id=pg_id,
604 if not self.config.no_flow_stats else None,
605 mode=STLTXCont(pps=ratio)))
607 streams.append(STLStream(packet=pkt,
608 flow_stats=STLFlowStats(pg_id=pg_id)
609 if not self.config.no_flow_stats else None,
610 mode=STLTXCont(pps=ratio)))
613 # for IMIX, the latency packets have the average IMIX packet size
614 if stream_cfg['ip_addrs_step'] == 'random' or \
615 stream_cfg['udp_port_step'] == 'random':
616 # Force latency flow to only one flow to avoid creating flows
617 # over requested flow count
618 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE, True)
620 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
623 l2frame_size = int(l2frame)
624 pkt = self._create_pkt(stream_cfg, l2frame_size)
625 if e2e or stream_cfg['mpls']:
626 streams.append(STLStream(packet=pkt,
627 # Flow stats is disabled for MPLS now
628 # flow_stats=STLFlowStats(pg_id=pg_id),
631 if stream_cfg['vxlan'] is True:
632 streams.append(STLStream(packet=pkt,
633 flow_stats=STLFlowStats(pg_id=pg_id,
635 if not self.config.no_flow_stats else None,
638 streams.append(STLStream(packet=pkt,
639 flow_stats=STLFlowStats(pg_id=pg_id)
640 if not self.config.no_flow_stats else None,
642 # for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
643 # without vlan, the min l2 frame size is 64
645 # This only applies to the latency stream
647 if stream_cfg['vlan_tag'] and l2frame_size < 68:
649 if stream_cfg['ip_addrs_step'] == 'random' or \
650 stream_cfg['udp_port_step'] == 'random':
651 # Force latency flow to only one flow to avoid creating flows
652 # over requested flow count
653 pkt = self._create_pkt(stream_cfg, l2frame_size, True)
655 pkt = self._create_pkt(stream_cfg, l2frame_size)
658 if self.config.no_latency_stats:
659 LOG.info("Latency flow statistics are disabled.")
660 if stream_cfg['vxlan'] is True:
661 streams.append(STLStream(packet=pkt,
662 flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
664 if not self.config.no_latency_stats else None,
665 mode=STLTXCont(pps=self.LATENCY_PPS)))
667 streams.append(STLStream(packet=pkt,
668 flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
669 if not self.config.no_latency_stats else None,
670 mode=STLTXCont(pps=self.LATENCY_PPS)))
674 def __connect(self, client):
677 def __connect_after_start(self):
678 # after start, Trex may take a bit of time to initialize
679 # so we need to retry a few times
680 for it in range(self.config.generic_retry_count):
683 self.client.connect()
685 except Exception as ex:
686 if it == (self.config.generic_retry_count - 1):
688 LOG.info("Retrying connection to TRex (%s)...", ex.msg)
691 """Connect to the TRex server."""
692 server_ip = self.generator_config.ip
693 LOG.info("Connecting to TRex (%s)...", server_ip)
695 # Connect to TRex server
696 self.client = STLClient(server=server_ip, sync_port=self.generator_config.zmq_rpc_port,
697 async_port=self.generator_config.zmq_pub_port)
699 self.__connect(self.client)
700 if server_ip == '127.0.0.1':
701 config_updated = self.__check_config()
702 if config_updated or self.config.restart:
704 except (TimeoutError, STLError) as e:
705 if server_ip == '127.0.0.1':
706 self.__start_local_server()
708 raise TrafficGeneratorException(e.message) from e
710 ports = list(self.generator_config.ports)
711 self.port_handle = ports
713 self.client.reset(ports)
714 # Read HW information from each port
715 # this returns an array of dict (1 per port)
717 Example of output for Intel XL710
718 [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
719 'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
720 u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
721 u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
722 u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
723 'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
724 u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
725 'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
726 'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
727 'layer_mode': 'Ethernet', u'numa': 0}, ...]
729 self.port_info = self.client.get_port_info(ports)
730 LOG.info('Connected to TRex')
731 for id, port in enumerate(self.port_info):
732 LOG.info(' Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
733 id, port['description'], port['speed'], port['src_mac'],
734 port['pci_addr'], port['driver'])
735 # Make sure the 2 ports have the same speed
736 if self.port_info[0]['speed'] != self.port_info[1]['speed']:
737 raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
738 (self.port_info[0]['speed'],
739 self.port_info[1]['speed']))
741 def __start_local_server(self):
743 LOG.info("Starting TRex ...")
744 self.__start_server()
745 self.__connect_after_start()
746 except (TimeoutError, STLError) as e:
747 LOG.error('Cannot connect to TRex')
748 LOG.error(traceback.format_exc())
749 logpath = '/tmp/trex.log'
750 if os.path.isfile(logpath):
751 # Wait for TRex to finish writing error message
753 for _ in range(self.config.generic_retry_count):
754 size = os.path.getsize(logpath)
755 if size == last_size:
756 # probably not writing anymore
760 with open(logpath, 'r') as f:
764 raise TrafficGeneratorException(message) from e
766 def __start_server(self):
767 server = TRexTrafficServer()
768 server.run_server(self.generator_config)
770 def __check_config(self):
771 server = TRexTrafficServer()
772 return server.check_config_updated(self.generator_config)
775 LOG.info("Restarting TRex ...")
777 # Wait for server stopped
778 for _ in range(self.config.generic_retry_count):
780 if not self.client.is_connected():
781 LOG.info("TRex is stopped...")
783 self.__start_local_server()
785 def __stop_server(self):
786 if self.generator_config.ip == '127.0.0.1':
787 ports = self.client.get_acquired_ports()
788 LOG.info('Release ports %s and stopping TRex...', ports)
791 self.client.release(ports=ports)
792 self.client.server_shutdown()
793 except STLError as e:
794 LOG.warning('Unable to stop TRex. Error: %s', e)
796 LOG.info('Using remote TRex. Unable to stop TRex')
798 def resolve_arp(self):
799 """Resolve all configured remote IP addresses.
801 return: None if ARP failed to resolve for all IP addresses
802 else a dict of list of dest macs indexed by port#
803 the dest macs in the list are indexed by the chain id
805 self.client.set_service_mode(ports=self.port_handle)
806 LOG.info('Polling ARP until successful...')
808 for port, device in zip(self.port_handle, self.generator_config.devices):
809 # there should be 1 stream config per chain
810 stream_configs = device.get_stream_configs()
811 chain_count = len(stream_configs)
812 ctx = self.client.create_service_ctx(port=port)
813 # all dest macs on this port indexed by chain ID
814 dst_macs = [None] * chain_count
816 # the index in the list is the chain id
817 if self.config.vxlan or self.config.mpls:
820 src_ip=device.vtep_src_ip,
821 dst_ip=device.vtep_dst_ip,
822 vlan=device.vtep_vlan)
823 for cfg in stream_configs
828 src_ip=cfg['ip_src_tg_gw'],
829 dst_ip=cfg['mac_discovery_gw'],
830 # will be None if no vlan tagging
831 vlan=cfg['vlan_tag'])
832 for cfg in stream_configs
835 for attempt in range(self.config.generic_retry_count):
839 LOG.error(traceback.format_exc())
843 for chain_id, mac in enumerate(dst_macs):
845 arp_record = arps[chain_id].get_record()
846 if arp_record.dst_mac:
847 dst_macs[chain_id] = arp_record.dst_mac
849 LOG.info(' ARP: port=%d chain=%d src IP=%s dst IP=%s -> MAC=%s',
852 arp_record.dst_ip, arp_record.dst_mac)
854 unresolved.append(arp_record.dst_ip)
855 if dst_macs_count == chain_count:
856 arp_dest_macs[port] = dst_macs
857 LOG.info('ARP resolved successfully for port %s', port)
861 LOG.info('Retrying ARP for: %s (retry %d/%d)',
862 unresolved, retry, self.config.generic_retry_count)
863 if retry < self.config.generic_retry_count:
864 time.sleep(self.config.generic_poll_sec)
866 LOG.error('ARP timed out for port %s (resolved %d out of %d)',
872 # A traffic capture may have been started (from a T-Rex console) at this time.
873 # If asked so, we keep the service mode enabled here, and disable it otherwise.
874 # | Disabling the service mode while a capture is in progress
875 # | would cause the application to stop/crash with an error.
876 if not self.config.service_mode:
877 self.client.set_service_mode(ports=self.port_handle, enabled=False)
878 if len(arp_dest_macs) == len(self.port_handle):
882 def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
883 """Check if rate provided by user is above requirements. Applies only if latency is True."""
884 intf_speed = self.generator_config.intf_speed
890 r = utils.convert_rates(l2frame_size, rate, intf_speed)
891 total_rate += int(r['rate_pps'])
894 r = utils.convert_rates(l2frame_size, rates[0], intf_speed)
895 total_rate = int(r['rate_pps'])
896 # rate must be enough for latency stream and at least 1 pps for base stream per chain
897 required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
898 result = utils.convert_rates(l2frame_size,
899 {'rate_pps': required_rate},
901 result['result'] = total_rate >= required_rate
904 return {'result': True}
906 def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
907 """Program all the streams in Trex server.
909 l2frame_size: L2 frame size or IMIX
910 rates: a list of 2 rates to run each direction
911 each rate is a dict like {'rate_pps': '10kpps'}
912 bidirectional: True if bidirectional
913 latency: True if latency measurement is needed
914 e2e: True if performing "end to end" connectivity check
916 r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
918 raise TrafficGeneratorException(
919 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
920 .format(pps=r['rate_pps'],
922 load=r['rate_percent']))
923 self.l2_frame_size = l2frame_size
924 # a dict of list of streams indexed by port#
925 # in case of fixed size, has self.chain_count * 2 * 2 streams
926 # (1 normal + 1 latency stream per direction per chain)
927 # for IMIX, has self.chain_count * 2 * 4 streams
928 # (3 normal + 1 latency stream per direction per chain)
930 for port in self.port_handle:
931 streamblock[port] = []
932 stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
933 if self.generator_config.ip_addrs_step == 'random' \
934 or self.generator_config.gen_config.udp_port_step == 'random':
935 LOG.warning("Using random step, the number of flows can be less than "
936 "the requested number of flows due to repeatable multivariate random "
937 "generation which can reproduce the same pattern of values")
938 self.rates = [utils.to_rate_str(rate) for rate in rates]
939 for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
940 streamblock[0].extend(self.generate_streams(self.port_handle[0],
946 if len(self.rates) > 1:
947 streamblock[1].extend(self.generate_streams(self.port_handle[1],
951 latency=bidirectional and latency,
954 for port in self.port_handle:
955 if self.config.vxlan:
956 self.client.set_port_attr(ports=port, vxlan_fs=[4789])
958 self.client.set_port_attr(ports=port, vxlan_fs=None)
959 self.client.add_streams(streamblock[port], ports=port)
960 LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
962 def clear_streamblock(self):
963 """Clear all streams from TRex."""
965 self.client.reset(self.port_handle)
966 LOG.info('Cleared all existing streams')
968 def get_stats(self, if_stats=None):
969 """Get stats from Trex."""
970 stats = self.client.get_stats()
971 return self.extract_stats(stats, if_stats)
974 """Return the Trex local port MAC addresses.
976 return: a list of MAC addresses indexed by the port#
978 return [port['src_mac'] for port in self.port_info]
980 def get_port_speed_gbps(self):
981 """Return the Trex local port MAC addresses.
983 return: a list of speed in Gbps indexed by the port#
985 return [port['speed'] for port in self.port_info]
987 def clear_stats(self):
988 """Clear all stats in the traffic gneerator."""
990 self.client.clear_stats()
992 def start_traffic(self):
993 """Start generating traffic in all ports."""
994 for port, rate in zip(self.port_handle, self.rates):
995 self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
997 def stop_traffic(self):
998 """Stop generating traffic."""
999 self.client.stop(ports=self.port_handle)
1001 def start_capture(self):
1002 """Capture all packets on both ports that are unicast to us."""
1005 # Need to filter out unwanted packets so we do not end up counting
1006 # src MACs of frames that are not unicast to us
1007 src_mac_list = self.get_macs()
1008 bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
1009 # ports must be set in service in order to enable capture
1010 self.client.set_service_mode(ports=self.port_handle)
1011 self.capture_id = self.client.start_capture \
1012 (rx_ports=self.port_handle, bpf_filter=bpf_filter)
1014 def fetch_capture_packets(self):
1015 """Fetch capture packets in capture mode."""
1017 self.packet_list = []
1018 self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
1019 output=self.packet_list)
1021 def stop_capture(self):
1022 """Stop capturing packets."""
1024 self.client.stop_capture(capture_id=self.capture_id['id'])
1025 self.capture_id = None
1026 # A traffic capture may have been started (from a T-Rex console) at this time.
1027 # If asked so, we keep the service mode enabled here, and disable it otherwise.
1028 # | Disabling the service mode while a capture is in progress
1029 # | would cause the application to stop/crash with an error.
1030 if not self.config.service_mode:
1031 self.client.set_service_mode(ports=self.port_handle, enabled=False)
1034 """Cleanup Trex driver."""
1037 self.client.reset(self.port_handle)
1038 self.client.disconnect()
1040 # TRex does not like a reset while in disconnected state
1043 def set_service_mode(self, enabled=True):
1044 """Enable/disable the 'service' mode."""
1045 self.client.set_service_mode(ports=self.port_handle, enabled=enabled)