1 # Copyright 2016 Cisco Systems, Inc. All rights reserved.
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
14 """Driver module for TRex traffic generator."""
21 from functools import reduce
23 from itertools import count
24 # pylint: disable=import-error
25 from scapy.contrib.mpls import MPLS # flake8: noqa
26 # pylint: enable=import-error
27 from nfvbench.log import LOG
28 from nfvbench.traffic_server import TRexTrafficServer
29 from nfvbench.utils import cast_integer
30 from nfvbench.utils import timeout
31 from nfvbench.utils import TimeoutError
33 from hdrh.histogram import HdrHistogram
35 # pylint: disable=import-error
36 from trex.common.services.trex_service_arp import ServiceARP
37 from trex.stl.api import bind_layers
38 from trex.stl.api import CTRexVmInsFixHwCs
39 from trex.stl.api import Dot1Q
40 from trex.stl.api import Ether
41 from trex.stl.api import FlagsField
42 from trex.stl.api import IP
43 from trex.stl.api import Packet
44 from trex.stl.api import STLClient
45 from trex.stl.api import STLError
46 from trex.stl.api import STLFlowLatencyStats
47 from trex.stl.api import STLFlowStats
48 from trex.stl.api import STLPktBuilder
49 from trex.stl.api import STLScVmRaw
50 from trex.stl.api import STLStream
51 from trex.stl.api import STLTXCont
52 from trex.stl.api import STLVmFixChecksumHw
53 from trex.stl.api import STLVmFixIpv4
54 from trex.stl.api import STLVmFlowVar
55 from trex.stl.api import STLVmFlowVarRepeatableRandom
56 from trex.stl.api import STLVmWrFlowVar
57 from trex.stl.api import ThreeBytesField
58 from trex.stl.api import UDP
59 from trex.stl.api import XByteField
61 # pylint: enable=import-error
63 from .traffic_base import AbstractTrafficGenerator
64 from .traffic_base import TrafficGeneratorException
65 from . import traffic_utils as utils
66 from .traffic_utils import IMIX_AVG_L2_FRAME_SIZE
67 from .traffic_utils import IMIX_L2_SIZES
68 from .traffic_utils import IMIX_RATIOS
73 _VXLAN_FLAGS = ['R' * 27] + ['I'] + ['R' * 5]
75 fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS),
76 ThreeBytesField("vni", 0),
77 XByteField("reserved", 0x00)]
81 return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
83 class TRex(AbstractTrafficGenerator):
84 """TRex traffic generator driver."""
87 CHAIN_PG_ID_MASK = 0x007F
88 PORT_PG_ID_MASK = 0x0080
89 LATENCY_PG_ID_MASK = 0x0100
91 def __init__(self, traffic_client):
93 AbstractTrafficGenerator.__init__(self, traffic_client)
97 self.chain_count = self.generator_config.service_chain_count
99 self.capture_id = None
100 self.packet_list = []
101 self.l2_frame_size = 0
103 def get_version(self):
104 """Get the Trex version."""
105 return self.client.get_server_version() if self.client else ''
107 def get_pg_id(self, port, chain_id):
108 """Calculate the packet group IDs to use for a given port/stream type/chain_id.
111 chain_id: identifies to which chain the pg_id is associated (0 to 255)
112 return: pg_id, lat_pg_id
114 We use a bit mask to set up the 3 fields:
115 0x007F: chain ID (8 bits for a max of 128 chains)
119 pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
120 return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
122 def extract_stats(self, in_stats, ifstats):
123 """Extract stats from dict returned by Trex API.
125 :param in_stats: dict as returned by TRex api
127 utils.nan_replace(in_stats)
128 # LOG.debug(in_stats)
131 # port_handles should have only 2 elements: [0, 1]
132 # so (1 - ph) will be the index for the far end port
133 for ph in self.port_handle:
135 far_end_stats = in_stats[1 - ph]
138 'total_pkts': cast_integer(stats['opackets']),
139 'total_pkt_bytes': cast_integer(stats['obytes']),
140 'pkt_rate': cast_integer(stats['tx_pps']),
141 'pkt_bit_rate': cast_integer(stats['tx_bps'])
144 'total_pkts': cast_integer(stats['ipackets']),
145 'total_pkt_bytes': cast_integer(stats['ibytes']),
146 'pkt_rate': cast_integer(stats['rx_pps']),
147 'pkt_bit_rate': cast_integer(stats['rx_bps']),
148 # how many pkts were dropped in RX direction
149 # need to take the tx counter on the far end port
150 'dropped_pkts': cast_integer(
151 far_end_stats['opackets'] - stats['ipackets'])
154 self.__combine_latencies(in_stats, result[ph]['rx'], ph)
156 total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
157 result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
158 # actual offered tx rate in bps
159 avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
160 total_tx_bps = utils.pps_to_bps(result["total_tx_rate"], avg_packet_size)
161 result['offered_tx_rate_bps'] = total_tx_bps
163 result.update(self.get_theoretical_rates(avg_packet_size))
165 result["flow_stats"] = in_stats["flow_stats"]
166 result["latency"] = in_stats["latency"]
168 # Merge HDRHistogram to have an overall value for all chains and ports
172 for chain_id, _ in enumerate(ifstats):
173 for ph in self.port_handle:
174 _, lat_pg_id = self.get_pg_id(ph, chain_id)
176 HdrHistogram.decode(in_stats['latency'][lat_pg_id]['latency']['hdrh']))
178 for pg_id in in_stats['latency']:
179 if pg_id != 'global':
181 HdrHistogram.decode(in_stats['latency'][pg_id]['latency']['hdrh']))
186 decoded_hdrh = reduce(add_hdrh, hdrh_list)
187 result["hdrh"] = HdrHistogram.encode(decoded_hdrh).decode('utf-8')
193 def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
194 """Extract the aggregated stats for a given chain.
196 trex_stats: stats as returned by get_stats()
197 if_stats: a list of 2 interface stats to update (port 0 and 1)
198 latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
199 latencies[p] is the latency for packets sent on port p
200 if there are no latency streams, the Latency instances are not modified
201 chain_idx: chain index of the interface stats
203 The packet counts include normal and latency streams.
205 Trex returns flows stats as follows:
207 'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
208 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
209 'rx_bytes': {0: nan, 1: nan, 'total': nan},
210 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
211 'rx_pps': {0: 0, 1: 0, 'total': 0},
212 'tx_bps': {0: 0, 1: 0, 'total': 0},
213 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
214 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
215 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
216 'tx_pps': {0: 0, 1: 0, 'total': 0}},
217 1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
218 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
219 'rx_bytes': {0: nan, 1: nan, 'total': nan},
220 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
221 'rx_pps': {0: 0, 1: 0, 'total': 0},
222 'tx_bps': {0: 0, 1: 0, 'total': 0},
223 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
224 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
225 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
226 'tx_pps': {0: 0, 1: 0, 'total': 0}},
227 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
228 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
229 'rx_bytes': {0: nan, 1: nan, 'total': nan},
230 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
231 'rx_pps': {0: 0, 1: 0, 'total': 0},
232 'tx_bps': {0: 0, 1: 0, 'total': 0},
233 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
234 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
235 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
236 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
238 the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
240 packet counters for a given stream sent on port p are reported as:
241 - tx_pkts[p] on port p
242 - rx_pkts[1-p] on the far end port
244 This is a tricky/critical counter transposition operation because
245 the results are grouped by port (not by stream):
246 tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
247 rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
248 tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
249 rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
251 or using a more generic formula:
252 tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
253 rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
255 the second formula is equivalent to
256 rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
258 If there are latency streams, those same counters need to be added in the same way
260 def get_latency(lval):
262 return int(round(lval))
268 for port in range(2):
269 pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
270 for pid in [pg_id, lat_pg_id]:
272 pg_stats = trex_stats['flow_stats'][pid]
273 if_stats[port].tx += pg_stats['tx_pkts'][port]
274 if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
278 lat = trex_stats['latency'][lat_pg_id]['latency']
279 # dropped_pkts += lat['err_cntrs']['dropped']
280 latencies[port].max_usec = get_latency(lat['total_max'])
281 if math.isnan(lat['total_min']):
282 latencies[port].min_usec = 0
283 latencies[port].avg_usec = 0
285 latencies[port].min_usec = get_latency(lat['total_min'])
286 latencies[port].avg_usec = get_latency(lat['average'])
287 # pick up the HDR histogram if present (otherwise will raise KeyError)
288 latencies[port].hdrh = lat['hdrh']
292 def __combine_latencies(self, in_stats, results, port_handle):
293 """Traverse TRex result dictionary and combines chosen latency stats.
295 example of latency dict returned by trex (2 chains):
296 'latency': {256: {'err_cntrs': {'dropped': 0,
301 'latency': {'average': 26.5,
302 'hdrh': u'HISTFAAAAEx4nJNpmSgz...bFRgxi',
303 'histogram': {20: 303,
313 257: {'err_cntrs': {'dropped': 0,
318 'latency': {'average': 29.75,
319 'hdrh': u'HISTFAAAAEV4nJN...CALilDG0=',
320 'histogram': {20: 261,
329 384: {'err_cntrs': {'dropped': 0,
334 'latency': {'average': 18.0,
335 'hdrh': u'HISTFAAAADR4nJNpm...MjCwDDxAZG',
336 'histogram': {20: 987, 30: 14},
341 385: {'err_cntrs': {'dropped': 0,
346 'latency': {'average': 19.0,
347 'hdrh': u'HISTFAAAADR4nJNpm...NkYmJgDdagfK',
348 'histogram': {20: 989, 30: 11},
353 'global': {'bad_hdr': 0, 'old_flow': 0}},
357 total_min = float("inf")
358 for chain_id in range(self.chain_count):
360 _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
361 lat = in_stats['latency'][lat_pg_id]['latency']
362 # dropped_pkts += lat['err_cntrs']['dropped']
363 total_max = max(lat['total_max'], total_max)
364 total_min = min(lat['total_min'], total_min)
365 average += lat['average']
368 if total_min == float("inf"):
370 results['min_delay_usec'] = total_min
371 results['max_delay_usec'] = total_max
372 results['avg_delay_usec'] = int(average / self.chain_count)
374 def _bind_vxlan(self):
375 bind_layers(UDP, VXLAN, dport=4789)
376 bind_layers(VXLAN, Ether)
378 def _create_pkt(self, stream_cfg, l2frame_size):
379 """Create a packet of given size.
381 l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
383 # Trex will add the FCS field, so we need to remove 4 bytes from the l2 frame size
384 frame_size = int(l2frame_size) - 4
386 if stream_cfg['vxlan'] is True:
389 pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
390 if stream_cfg['vtep_vlan'] is not None:
391 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
392 pkt_base /= IP(src=stream_cfg['vtep_src_ip'], dst=stream_cfg['vtep_dst_ip'])
393 pkt_base /= UDP(sport=random.randint(1337, 32767), dport=4789)
394 pkt_base /= VXLAN(vni=stream_cfg['net_vni'])
395 pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
396 # need to randomize the outer header UDP src port based on flow
397 vxlan_udp_src_fv = STLVmFlowVar(
398 name="vxlan_udp_src",
403 vm_param = [vxlan_udp_src_fv,
404 STLVmWrFlowVar(fv_name="vxlan_udp_src", pkt_offset="UDP.sport")]
405 elif stream_cfg['mpls'] is True:
407 pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
408 if stream_cfg['vtep_vlan'] is not None:
409 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
410 if stream_cfg['mpls_outer_label'] is not None:
411 pkt_base /= MPLS(label=stream_cfg['mpls_outer_label'], cos=1, s=0, ttl=255)
412 if stream_cfg['mpls_inner_label'] is not None:
413 pkt_base /= MPLS(label=stream_cfg['mpls_inner_label'], cos=1, s=1, ttl=255)
414 # Flow stats and MPLS labels randomization TBD
415 pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
418 pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
420 if stream_cfg['vlan_tag'] is not None:
421 pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
424 if stream_cfg['udp_src_port']:
425 udp_args['sport'] = int(stream_cfg['udp_src_port'])
426 if stream_cfg['udp_port_step'] == 'random':
429 step = stream_cfg['udp_port_step']
430 udp_args['sport_step'] = int(step)
431 udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
432 if stream_cfg['udp_dst_port']:
433 udp_args['dport'] = int(stream_cfg['udp_dst_port'])
434 if stream_cfg['udp_port_step'] == 'random':
437 step = stream_cfg['udp_port_step']
438 udp_args['dport_step'] = int(step)
439 udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
441 pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
442 UDP(dport=udp_args['dport'], sport=udp_args['sport'])
443 if stream_cfg['ip_src_static'] is True:
444 src_max_ip_value = stream_cfg['ip_src_addr']
446 src_max_ip_value = stream_cfg['ip_src_addr_max']
447 if stream_cfg['ip_addrs_step'] == 'random':
448 src_fv_ip = STLVmFlowVarRepeatableRandom(
450 min_value=stream_cfg['ip_src_addr'],
451 max_value=src_max_ip_value,
453 seed=random.randint(0, 32767),
454 limit=stream_cfg['ip_src_count'])
455 dst_fv_ip = STLVmFlowVarRepeatableRandom(
457 min_value=stream_cfg['ip_dst_addr'],
458 max_value=stream_cfg['ip_dst_addr_max'],
460 seed=random.randint(0, 32767),
461 limit=stream_cfg['ip_dst_count'])
463 src_fv_ip = STLVmFlowVar(
465 min_value=stream_cfg['ip_src_addr'],
466 max_value=src_max_ip_value,
469 step=stream_cfg['ip_addrs_step'])
470 dst_fv_ip = STLVmFlowVar(
472 min_value=stream_cfg['ip_dst_addr'],
473 max_value=stream_cfg['ip_dst_addr_max'],
476 step=stream_cfg['ip_addrs_step'])
478 if stream_cfg['udp_port_step'] == 'random':
479 src_fv_port = STLVmFlowVarRepeatableRandom(
481 min_value=udp_args['sport'],
482 max_value=udp_args['sport_max'],
484 seed=random.randint(0, 32767),
485 limit=stream_cfg['udp_src_count'])
486 dst_fv_port = STLVmFlowVarRepeatableRandom(
488 min_value=udp_args['dport'],
489 max_value=udp_args['dport_max'],
491 seed=random.randint(0, 32767),
492 limit=stream_cfg['udp_dst_count'])
494 src_fv_port = STLVmFlowVar(
496 min_value=udp_args['sport'],
497 max_value=udp_args['sport_max'],
500 step=udp_args['sport_step'])
501 dst_fv_port = STLVmFlowVar(
503 min_value=udp_args['dport'],
504 max_value=udp_args['dport_max'],
507 step=udp_args['dport_step'])
510 STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
512 STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
514 STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
516 STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
518 # Use HW Offload to calculate the outter IP/UDP packet
519 vm_param.append(STLVmFixChecksumHw(l3_offset="IP:0",
521 l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP))
522 # Use software to fix the inner IP/UDP payload for VxLAN packets
524 vm_param.append(STLVmFixIpv4(offset="IP:1"))
525 pad = max(0, frame_size - len(pkt_base)) * 'x'
527 return STLPktBuilder(pkt=pkt_base / pad,
528 vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
530 def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
532 """Create a list of streams corresponding to a given chain and stream config.
534 port: port where the streams originate (0 or 1)
535 chain_id: the chain to which the streams are associated to
536 stream_cfg: stream configuration
537 l2frame: L2 frame size (including 4-byte FCS) or 'IMIX'
538 latency: if True also create a latency stream
539 e2e: True if performing "end to end" connectivity check
542 pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
543 if self.config.no_flow_stats:
544 LOG.info("Traffic flow statistics are disabled.")
545 if l2frame == 'IMIX':
546 for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
547 pkt = self._create_pkt(stream_cfg, l2_frame_size)
548 if e2e or stream_cfg['mpls']:
549 streams.append(STLStream(packet=pkt,
550 mode=STLTXCont(pps=ratio)))
552 if stream_cfg['vxlan'] is True:
553 streams.append(STLStream(packet=pkt,
554 flow_stats=STLFlowStats(pg_id=pg_id,
556 if not self.config.no_flow_stats else None,
557 mode=STLTXCont(pps=ratio)))
559 streams.append(STLStream(packet=pkt,
560 flow_stats=STLFlowStats(pg_id=pg_id)
561 if not self.config.no_flow_stats else None,
562 mode=STLTXCont(pps=ratio)))
565 # for IMIX, the latency packets have the average IMIX packet size
566 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
569 l2frame_size = int(l2frame)
570 pkt = self._create_pkt(stream_cfg, l2frame_size)
571 if e2e or stream_cfg['mpls']:
572 streams.append(STLStream(packet=pkt,
573 # Flow stats is disabled for MPLS now
574 # flow_stats=STLFlowStats(pg_id=pg_id),
577 if stream_cfg['vxlan'] is True:
578 streams.append(STLStream(packet=pkt,
579 flow_stats=STLFlowStats(pg_id=pg_id,
581 if not self.config.no_flow_stats else None,
584 streams.append(STLStream(packet=pkt,
585 flow_stats=STLFlowStats(pg_id=pg_id)
586 if not self.config.no_flow_stats else None,
588 # for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
589 # without vlan, the min l2 frame size is 64
591 # This only applies to the latency stream
592 if latency and stream_cfg['vlan_tag'] and l2frame_size < 68:
593 pkt = self._create_pkt(stream_cfg, 68)
596 if self.config.no_latency_stats:
597 LOG.info("Latency flow statistics are disabled.")
598 if stream_cfg['vxlan'] is True:
599 streams.append(STLStream(packet=pkt,
600 flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
602 if not self.config.no_latency_stats else None,
603 mode=STLTXCont(pps=self.LATENCY_PPS)))
605 streams.append(STLStream(packet=pkt,
606 flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
607 if not self.config.no_latency_stats else None,
608 mode=STLTXCont(pps=self.LATENCY_PPS)))
612 def __connect(self, client):
615 def __connect_after_start(self):
616 # after start, Trex may take a bit of time to initialize
617 # so we need to retry a few times
618 for it in range(self.config.generic_retry_count):
621 self.client.connect()
623 except Exception as ex:
624 if it == (self.config.generic_retry_count - 1):
626 LOG.info("Retrying connection to TRex (%s)...", ex.msg)
629 """Connect to the TRex server."""
630 server_ip = self.generator_config.ip
631 LOG.info("Connecting to TRex (%s)...", server_ip)
633 # Connect to TRex server
634 self.client = STLClient(server=server_ip, sync_port=self.generator_config.zmq_rpc_port,
635 async_port=self.generator_config.zmq_pub_port)
637 self.__connect(self.client)
638 if server_ip == '127.0.0.1':
639 config_updated = self.__check_config()
640 if config_updated or self.config.restart:
642 except (TimeoutError, STLError) as e:
643 if server_ip == '127.0.0.1':
644 self.__start_local_server()
646 raise TrafficGeneratorException(e.message) from e
648 ports = list(self.generator_config.ports)
649 self.port_handle = ports
651 self.client.reset(ports)
652 # Read HW information from each port
653 # this returns an array of dict (1 per port)
655 Example of output for Intel XL710
656 [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
657 'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
658 u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
659 u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
660 u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
661 'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
662 u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
663 'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
664 'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
665 'layer_mode': 'Ethernet', u'numa': 0}, ...]
667 self.port_info = self.client.get_port_info(ports)
668 LOG.info('Connected to TRex')
669 for id, port in enumerate(self.port_info):
670 LOG.info(' Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
671 id, port['description'], port['speed'], port['src_mac'],
672 port['pci_addr'], port['driver'])
673 # Make sure the 2 ports have the same speed
674 if self.port_info[0]['speed'] != self.port_info[1]['speed']:
675 raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
676 (self.port_info[0]['speed'],
677 self.port_info[1]['speed']))
679 def __start_local_server(self):
681 LOG.info("Starting TRex ...")
682 self.__start_server()
683 self.__connect_after_start()
684 except (TimeoutError, STLError) as e:
685 LOG.error('Cannot connect to TRex')
686 LOG.error(traceback.format_exc())
687 logpath = '/tmp/trex.log'
688 if os.path.isfile(logpath):
689 # Wait for TRex to finish writing error message
691 for _ in range(self.config.generic_retry_count):
692 size = os.path.getsize(logpath)
693 if size == last_size:
694 # probably not writing anymore
698 with open(logpath, 'r') as f:
702 raise TrafficGeneratorException(message) from e
704 def __start_server(self):
705 server = TRexTrafficServer()
706 server.run_server(self.generator_config)
708 def __check_config(self):
709 server = TRexTrafficServer()
710 return server.check_config_updated(self.generator_config)
713 LOG.info("Restarting TRex ...")
715 # Wait for server stopped
716 for _ in range(self.config.generic_retry_count):
718 if not self.client.is_connected():
719 LOG.info("TRex is stopped...")
721 self.__start_local_server()
723 def __stop_server(self):
724 if self.generator_config.ip == '127.0.0.1':
725 ports = self.client.get_acquired_ports()
726 LOG.info('Release ports %s and stopping TRex...', ports)
729 self.client.release(ports=ports)
730 self.client.server_shutdown()
731 except STLError as e:
732 LOG.warning('Unable to stop TRex. Error: %s', e)
734 LOG.info('Using remote TRex. Unable to stop TRex')
736 def resolve_arp(self):
737 """Resolve all configured remote IP addresses.
739 return: None if ARP failed to resolve for all IP addresses
740 else a dict of list of dest macs indexed by port#
741 the dest macs in the list are indexed by the chain id
743 self.client.set_service_mode(ports=self.port_handle)
744 LOG.info('Polling ARP until successful...')
746 for port, device in zip(self.port_handle, self.generator_config.devices):
747 # there should be 1 stream config per chain
748 stream_configs = device.get_stream_configs()
749 chain_count = len(stream_configs)
750 ctx = self.client.create_service_ctx(port=port)
751 # all dest macs on this port indexed by chain ID
752 dst_macs = [None] * chain_count
754 # the index in the list is the chain id
755 if self.config.vxlan or self.config.mpls:
758 src_ip=device.vtep_src_ip,
759 dst_ip=device.vtep_dst_ip,
760 vlan=device.vtep_vlan)
761 for cfg in stream_configs
766 src_ip=cfg['ip_src_tg_gw'],
767 dst_ip=cfg['mac_discovery_gw'],
768 # will be None if no vlan tagging
769 vlan=cfg['vlan_tag'])
770 for cfg in stream_configs
773 for attempt in range(self.config.generic_retry_count):
777 LOG.error(traceback.format_exc())
781 for chain_id, mac in enumerate(dst_macs):
783 arp_record = arps[chain_id].get_record()
784 if arp_record.dst_mac:
785 dst_macs[chain_id] = arp_record.dst_mac
787 LOG.info(' ARP: port=%d chain=%d src IP=%s dst IP=%s -> MAC=%s',
790 arp_record.dst_ip, arp_record.dst_mac)
792 unresolved.append(arp_record.dst_ip)
793 if dst_macs_count == chain_count:
794 arp_dest_macs[port] = dst_macs
795 LOG.info('ARP resolved successfully for port %s', port)
799 LOG.info('Retrying ARP for: %s (retry %d/%d)',
800 unresolved, retry, self.config.generic_retry_count)
801 if retry < self.config.generic_retry_count:
802 time.sleep(self.config.generic_poll_sec)
804 LOG.error('ARP timed out for port %s (resolved %d out of %d)',
810 # if the capture from the TRex console was started before the arp request step,
811 # it keeps 'service_mode' enabled, otherwise, it disables the 'service_mode'
812 if not self.config.service_mode:
813 self.client.set_service_mode(ports=self.port_handle, enabled=False)
814 if len(arp_dest_macs) == len(self.port_handle):
818 def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
819 """Check if rate provided by user is above requirements. Applies only if latency is True."""
820 intf_speed = self.generator_config.intf_speed
826 r = utils.convert_rates(l2frame_size, rate, intf_speed)
827 total_rate += int(r['rate_pps'])
830 total_rate = utils.convert_rates(l2frame_size, rates[0], intf_speed)
831 # rate must be enough for latency stream and at least 1 pps for base stream per chain
832 required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
833 result = utils.convert_rates(l2frame_size,
834 {'rate_pps': required_rate},
836 result['result'] = total_rate >= required_rate
839 return {'result': True}
841 def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
842 """Program all the streams in Trex server.
844 l2frame_size: L2 frame size or IMIX
845 rates: a list of 2 rates to run each direction
846 each rate is a dict like {'rate_pps': '10kpps'}
847 bidirectional: True if bidirectional
848 latency: True if latency measurement is needed
849 e2e: True if performing "end to end" connectivity check
851 r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
853 raise TrafficGeneratorException(
854 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
855 .format(pps=r['rate_pps'],
857 load=r['rate_percent']))
858 self.l2_frame_size = l2frame_size
859 # a dict of list of streams indexed by port#
860 # in case of fixed size, has self.chain_count * 2 * 2 streams
861 # (1 normal + 1 latency stream per direction per chain)
862 # for IMIX, has self.chain_count * 2 * 4 streams
863 # (3 normal + 1 latency stream per direction per chain)
865 for port in self.port_handle:
866 streamblock[port] = []
867 stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
868 self.rates = [utils.to_rate_str(rate) for rate in rates]
869 for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
870 streamblock[0].extend(self.generate_streams(self.port_handle[0],
876 if len(self.rates) > 1:
877 streamblock[1].extend(self.generate_streams(self.port_handle[1],
881 latency=bidirectional and latency,
884 for port in self.port_handle:
885 if self.config.vxlan:
886 self.client.set_port_attr(ports=port, vxlan_fs=[4789])
888 self.client.set_port_attr(ports=port, vxlan_fs=None)
889 self.client.add_streams(streamblock[port], ports=port)
890 LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
892 def clear_streamblock(self):
893 """Clear all streams from TRex."""
895 self.client.reset(self.port_handle)
896 LOG.info('Cleared all existing streams')
898 def get_stats(self, if_stats=None):
899 """Get stats from Trex."""
900 stats = self.client.get_stats()
901 return self.extract_stats(stats, if_stats)
904 """Return the Trex local port MAC addresses.
906 return: a list of MAC addresses indexed by the port#
908 return [port['src_mac'] for port in self.port_info]
910 def get_port_speed_gbps(self):
911 """Return the Trex local port MAC addresses.
913 return: a list of speed in Gbps indexed by the port#
915 return [port['speed'] for port in self.port_info]
917 def clear_stats(self):
918 """Clear all stats in the traffic gneerator."""
920 self.client.clear_stats()
922 def start_traffic(self):
923 """Start generating traffic in all ports."""
924 for port, rate in zip(self.port_handle, self.rates):
925 self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
927 def stop_traffic(self):
928 """Stop generating traffic."""
929 self.client.stop(ports=self.port_handle)
931 def start_capture(self):
932 """Capture all packets on both ports that are unicast to us."""
935 # Need to filter out unwanted packets so we do not end up counting
936 # src MACs of frames that are not unicast to us
937 src_mac_list = self.get_macs()
938 bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
939 # ports must be set in service in order to enable capture
940 self.client.set_service_mode(ports=self.port_handle)
941 self.capture_id = self.client.start_capture \
942 (rx_ports=self.port_handle, bpf_filter=bpf_filter)
944 def fetch_capture_packets(self):
945 """Fetch capture packets in capture mode."""
947 self.packet_list = []
948 self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
949 output=self.packet_list)
951 def stop_capture(self):
952 """Stop capturing packets."""
954 self.client.stop_capture(capture_id=self.capture_id['id'])
955 self.capture_id = None
956 # if the capture from TRex console was started before the connectivity step,
957 # it keeps 'service_mode' enabled, otherwise, it disables the 'service_mode'
958 if not self.config.service_mode:
959 self.client.set_service_mode(ports=self.port_handle, enabled=False)
962 """Cleanup Trex driver."""
965 self.client.reset(self.port_handle)
966 self.client.disconnect()
968 # TRex does not like a reset while in disconnected state
971 def set_service_mode(self, enabled=True):
972 """Enable/disable the 'service_mode'."""
973 self.client.set_service_mode(ports=self.port_handle, enabled=enabled)