1 # Copyright 2016 Cisco Systems, Inc. All rights reserved.
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
20 from itertools import count
21 from nfvbench.log import LOG
22 from nfvbench.specs import ChainType
23 from nfvbench.traffic_server import TRexTrafficServer
24 from nfvbench.utils import cast_integer
25 from nfvbench.utils import timeout
26 from nfvbench.utils import TimeoutError
27 from traffic_base import AbstractTrafficGenerator
28 from traffic_base import TrafficGeneratorException
29 import traffic_utils as utils
31 # pylint: disable=import-error
32 from trex_stl_lib.api import CTRexVmInsFixHwCs
33 from trex_stl_lib.api import Dot1Q
34 from trex_stl_lib.api import Ether
35 from trex_stl_lib.api import IP
36 from trex_stl_lib.api import STLClient
37 from trex_stl_lib.api import STLError
38 from trex_stl_lib.api import STLFlowLatencyStats
39 from trex_stl_lib.api import STLFlowStats
40 from trex_stl_lib.api import STLPktBuilder
41 from trex_stl_lib.api import STLScVmRaw
42 from trex_stl_lib.api import STLStream
43 from trex_stl_lib.api import STLTXCont
44 from trex_stl_lib.api import STLVmFixChecksumHw
45 from trex_stl_lib.api import STLVmFlowVar
46 from trex_stl_lib.api import STLVmFlowVarRepetableRandom
47 from trex_stl_lib.api import STLVmWrFlowVar
48 from trex_stl_lib.api import UDP
49 from trex_stl_lib.services.trex_stl_service_arp import STLServiceARP
52 # pylint: enable=import-error
55 class TRex(AbstractTrafficGenerator):
56 """TRex traffic generator driver."""
59 CHAIN_PG_ID_MASK = 0x007F
60 PORT_PG_ID_MASK = 0x0080
61 LATENCY_PG_ID_MASK = 0x0100
63 def __init__(self, traffic_client):
64 AbstractTrafficGenerator.__init__(self, traffic_client)
68 self.chain_count = self.generator_config.service_chain_count
70 self.capture_id = None
73 def get_version(self):
74 """Get the Trex version."""
75 return self.client.get_server_version()
77 def get_pg_id(self, port, chain_id):
78 """Calculate the packet group IDs to use for a given port/stream type/chain_id.
81 chain_id: identifies to which chain the pg_id is associated (0 to 255)
82 return: pg_id, lat_pg_id
84 We use a bit mask to set up the 3 fields:
85 0x007F: chain ID (8 bits for a max of 128 chains)
89 pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
90 return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
92 def extract_stats(self, in_stats):
93 """Extract stats from dict returned by Trex API.
95 :param in_stats: dict as returned by TRex api
97 utils.nan_replace(in_stats)
101 # port_handles should have only 2 elements: [0, 1]
102 # so (1 - ph) will be the index for the far end port
103 for ph in self.port_handle:
105 far_end_stats = in_stats[1 - ph]
108 'total_pkts': cast_integer(stats['opackets']),
109 'total_pkt_bytes': cast_integer(stats['obytes']),
110 'pkt_rate': cast_integer(stats['tx_pps']),
111 'pkt_bit_rate': cast_integer(stats['tx_bps'])
114 'total_pkts': cast_integer(stats['ipackets']),
115 'total_pkt_bytes': cast_integer(stats['ibytes']),
116 'pkt_rate': cast_integer(stats['rx_pps']),
117 'pkt_bit_rate': cast_integer(stats['rx_bps']),
118 # how many pkts were dropped in RX direction
119 # need to take the tx counter on the far end port
120 'dropped_pkts': cast_integer(
121 far_end_stats['opackets'] - stats['ipackets'])
124 self.__combine_latencies(in_stats, result[ph]['rx'], ph)
126 total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
127 result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
128 result["flow_stats"] = in_stats["flow_stats"]
129 result["latency"] = in_stats["latency"]
132 def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
133 """Extract the aggregated stats for a given chain.
135 trex_stats: stats as returned by get_stats()
136 if_stats: a list of 2 interface stats to update (port 0 and 1)
137 latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
138 latencies[p] is the latency for packets sent on port p
139 if there are no latency streams, the Latency instances are not modified
140 chain_idx: chain index of the interface stats
142 The packet counts include normal and latency streams.
144 Trex returns flows stats as follows:
146 'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
147 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
148 'rx_bytes': {0: nan, 1: nan, 'total': nan},
149 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
150 'rx_pps': {0: 0, 1: 0, 'total': 0},
151 'tx_bps': {0: 0, 1: 0, 'total': 0},
152 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
153 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
154 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
155 'tx_pps': {0: 0, 1: 0, 'total': 0}},
156 1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
157 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
158 'rx_bytes': {0: nan, 1: nan, 'total': nan},
159 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
160 'rx_pps': {0: 0, 1: 0, 'total': 0},
161 'tx_bps': {0: 0, 1: 0, 'total': 0},
162 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
163 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
164 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
165 'tx_pps': {0: 0, 1: 0, 'total': 0}},
166 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
167 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
168 'rx_bytes': {0: nan, 1: nan, 'total': nan},
169 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
170 'rx_pps': {0: 0, 1: 0, 'total': 0},
171 'tx_bps': {0: 0, 1: 0, 'total': 0},
172 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
173 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
174 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
175 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
177 the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
179 packet counters for a given stream sent on port p are reported as:
180 - tx_pkts[p] on port p
181 - rx_pkts[1-p] on the far end port
183 This is a tricky/critical counter transposition operation because
184 the results are grouped by port (not by stream):
185 tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
186 rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
187 tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
188 rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
190 or using a more generic formula:
191 tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
192 rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
194 the second formula is equivalent to
195 rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
197 If there are latency streams, those same counters need to be added in the same way
201 for port in range(2):
202 pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
203 for pid in [pg_id, lat_pg_id]:
205 pg_stats = trex_stats['flow_stats'][pid]
206 if_stats[port].tx += pg_stats['tx_pkts'][port]
207 if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
211 lat = trex_stats['latency'][lat_pg_id]['latency']
212 # dropped_pkts += lat['err_cntrs']['dropped']
213 latencies[port].max_usec = int(round(lat['total_max']))
214 latencies[port].min_usec = int(round(lat['total_min']))
215 latencies[port].avg_usec = int(round(lat['average']))
219 def __combine_latencies(self, in_stats, results, port_handle):
220 """Traverse TRex result dictionary and combines chosen latency stats."""
223 total_min = float("inf")
224 for chain_id in range(self.chain_count):
226 _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
227 lat = in_stats['latency'][lat_pg_id]['latency']
228 # dropped_pkts += lat['err_cntrs']['dropped']
229 total_max = max(lat['total_max'], total_max)
230 total_min = min(lat['total_min'], total_min)
231 average += lat['average']
234 if total_min == float("inf"):
236 results['min_delay_usec'] = total_min
237 results['max_delay_usec'] = total_max
238 results['avg_delay_usec'] = int(average / self.chain_count)
240 def _create_pkt(self, stream_cfg, l2frame_size):
241 pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
242 if stream_cfg['vlan_tag'] is not None:
243 # 50 = 14 (Ethernet II) + 4 (Vlan tag) + 4 (CRC Checksum) + 20 (IPv4) + 8 (UDP)
244 pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
245 l2payload_size = int(l2frame_size) - 50
247 # 46 = 14 (Ethernet II) + 4 (CRC Checksum) + 20 (IPv4) + 8 (UDP)
248 l2payload_size = int(l2frame_size) - 46
249 payload = 'x' * l2payload_size
251 if stream_cfg['udp_src_port']:
252 udp_args['sport'] = int(stream_cfg['udp_src_port'])
253 if stream_cfg['udp_dst_port']:
254 udp_args['dport'] = int(stream_cfg['udp_dst_port'])
255 pkt_base /= IP() / UDP(**udp_args)
257 if stream_cfg['ip_addrs_step'] == 'random':
258 src_fv = STLVmFlowVarRepetableRandom(
260 min_value=stream_cfg['ip_src_addr'],
261 max_value=stream_cfg['ip_src_addr_max'],
263 seed=random.randint(0, 32767),
264 limit=stream_cfg['ip_src_count'])
265 dst_fv = STLVmFlowVarRepetableRandom(
267 min_value=stream_cfg['ip_dst_addr'],
268 max_value=stream_cfg['ip_dst_addr_max'],
270 seed=random.randint(0, 32767),
271 limit=stream_cfg['ip_dst_count'])
273 src_fv = STLVmFlowVar(
275 min_value=stream_cfg['ip_src_addr'],
276 max_value=stream_cfg['ip_src_addr'],
279 step=stream_cfg['ip_addrs_step'])
280 dst_fv = STLVmFlowVar(
282 min_value=stream_cfg['ip_dst_addr'],
283 max_value=stream_cfg['ip_dst_addr_max'],
286 step=stream_cfg['ip_addrs_step'])
290 STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP.src"),
292 STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP.dst"),
293 STLVmFixChecksumHw(l3_offset="IP",
295 l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP)
298 return STLPktBuilder(pkt=pkt_base / payload, vm=STLScVmRaw(vm_param))
300 def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True):
301 """Create a list of streams corresponding to a given chain and stream config.
303 port: port where the streams originate (0 or 1)
304 chain_id: the chain to which the streams are associated to
305 stream_cfg: stream configuration
306 l2frame: L2 frame size
307 latency: if True also create a latency stream
310 pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
311 if l2frame == 'IMIX':
312 min_size = 64 if stream_cfg['vlan_tag'] is None else 68
313 self.adjust_imix_min_size(min_size)
314 for ratio, l2_frame_size in zip(self.imix_ratios, self.imix_l2_sizes):
315 pkt = self._create_pkt(stream_cfg, l2_frame_size)
316 streams.append(STLStream(packet=pkt,
317 flow_stats=STLFlowStats(pg_id=pg_id),
318 mode=STLTXCont(pps=ratio)))
321 # for IMIX, the latency packets have the average IMIX packet size
322 pkt = self._create_pkt(stream_cfg, self.imix_avg_l2_size)
325 pkt = self._create_pkt(stream_cfg, l2frame)
326 streams.append(STLStream(packet=pkt,
327 flow_stats=STLFlowStats(pg_id=pg_id),
331 streams.append(STLStream(packet=pkt,
332 flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id),
333 mode=STLTXCont(pps=self.LATENCY_PPS)))
337 def __connect(self, client):
340 def __connect_after_start(self):
341 # after start, Trex may take a bit of time to initialize
342 # so we need to retry a few times
343 for it in xrange(self.config.generic_retry_count):
346 self.client.connect()
348 except Exception as ex:
349 if it == (self.config.generic_retry_count - 1):
351 LOG.info("Retrying connection to TRex (%s)...", ex.message)
354 """Connect to the TRex server."""
355 server_ip = self.generator_config.ip
356 LOG.info("Connecting to TRex (%s)...", server_ip)
358 # Connect to TRex server
359 self.client = STLClient(server=server_ip)
361 self.__connect(self.client)
362 except (TimeoutError, STLError) as e:
363 if server_ip == '127.0.0.1':
365 self.__start_server()
366 self.__connect_after_start()
367 except (TimeoutError, STLError) as e:
368 LOG.error('Cannot connect to TRex')
369 LOG.error(traceback.format_exc())
370 logpath = '/tmp/trex.log'
371 if os.path.isfile(logpath):
372 # Wait for TRex to finish writing error message
374 for _ in xrange(self.config.generic_retry_count):
375 size = os.path.getsize(logpath)
376 if size == last_size:
377 # probably not writing anymore
381 with open(logpath, 'r') as f:
385 raise TrafficGeneratorException(message)
387 raise TrafficGeneratorException(e.message)
389 ports = list(self.generator_config.ports)
390 self.port_handle = ports
392 self.client.reset(ports)
393 # Read HW information from each port
394 # this returns an array of dict (1 per port)
396 Example of output for Intel XL710
397 [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
398 'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
399 u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
400 u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
401 u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
402 'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
403 u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
404 'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
405 'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
406 'layer_mode': 'Ethernet', u'numa': 0}, ...]
408 self.port_info = self.client.get_port_info(ports)
409 LOG.info('Connected to TRex')
410 for id, port in enumerate(self.port_info):
411 LOG.info(' Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
412 id, port['description'], port['speed'], port['src_mac'],
413 port['pci_addr'], port['driver'])
414 # Make sure the 2 ports have the same speed
415 if self.port_info[0]['speed'] != self.port_info[1]['speed']:
416 raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
417 (self.port_info[0]['speed'],
418 self.port_info[1]['speed']))
421 if self.config.service_chain == ChainType.EXT and not self.config.no_arp:
426 def __set_l3_mode(self):
427 self.client.set_service_mode(ports=self.port_handle, enabled=True)
428 for port, device in zip(self.port_handle, self.generator_config.devices):
430 self.client.set_l3_mode(port=port,
431 src_ipv4=device.tg_gateway_ip,
432 dst_ipv4=device.dst.gateway_ip,
433 vlan=device.vlan_tag if device.vlan_tagging else None)
435 # TRex tries to resolve ARP already, doesn't have to be successful yet
437 self.client.set_service_mode(ports=self.port_handle, enabled=False)
439 def __set_l2_mode(self):
440 self.client.set_service_mode(ports=self.port_handle, enabled=True)
441 for port, device in zip(self.port_handle, self.generator_config.devices):
442 for cfg in device.get_stream_configs():
443 self.client.set_l2_mode(port=port, dst_mac=cfg['mac_dst'])
444 self.client.set_service_mode(ports=self.port_handle, enabled=False)
446 def __start_server(self):
447 server = TRexTrafficServer()
448 server.run_server(self.generator_config)
450 def resolve_arp(self):
451 """Resolve all configured remote IP addresses.
453 return: None if ARP failed to resolve for all IP addresses
454 else a dict of list of dest macs indexed by port#
455 the dest macs in the list are indexed by the chain id
457 self.client.set_service_mode(ports=self.port_handle)
458 LOG.info('Polling ARP until successful...')
460 for port, device in zip(self.port_handle, self.generator_config.devices):
461 # there should be 1 stream config per chain
462 stream_configs = device.get_stream_configs()
463 chain_count = len(stream_configs)
464 ctx = self.client.create_service_ctx(port=port)
465 # all dest macs on this port indexed by chain ID
466 dst_macs = [None] * chain_count
468 # the index in the list is the chain id
471 src_ip=cfg['ip_src_tg_gw'],
472 dst_ip=cfg['mac_discovery_gw'],
473 vlan=device.vlan_tag if device.vlan_tagging else None)
474 for cfg in stream_configs()
477 for attempt in range(self.config.generic_retry_count):
481 LOG.error(traceback.format_exc())
485 for chain_id, mac in enumerate(dst_macs):
487 arp_record = arps[chain_id].get_record()
488 if arp_record.dest_mac:
489 dst_macs[chain_id] = arp_record.dst_mac
491 LOG.info(' ARP: port=%d chain=%d IP=%s -> MAC=%s',
493 arp_record.dst_ip, arp_record.dst_mac)
495 unresolved.append(arp_record.dst_ip)
496 if dst_macs_count == chain_count:
497 arps[port] = dst_macs
498 LOG.info('ARP resolved successfully for port %s', port)
502 LOG.info('Retrying ARP for: %s (retry %d/%d)',
503 unresolved, retry, self.config.generic_retry_count)
504 if retry < self.config.generic_retry_count:
505 time.sleep(self.config.generic_poll_sec)
507 LOG.error('ARP timed out for port %s (resolved %d out of %d)',
513 self.client.set_service_mode(ports=self.port_handle, enabled=False)
514 if len(arps) == len(self.port_handle):
518 def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
519 """Check if rate provided by user is above requirements. Applies only if latency is True."""
520 intf_speed = self.generator_config.intf_speed
526 r = utils.convert_rates(l2frame_size, rate, intf_speed)
527 total_rate += int(r['rate_pps'])
530 total_rate = utils.convert_rates(l2frame_size, rates[0], intf_speed)
531 # rate must be enough for latency stream and at least 1 pps for base stream per chain
532 required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
533 result = utils.convert_rates(l2frame_size,
534 {'rate_pps': required_rate},
536 result['result'] = total_rate >= required_rate
539 return {'result': True}
541 def create_traffic(self, l2frame_size, rates, bidirectional, latency=True):
542 """Program all the streams in Trex server.
544 l2frame_size: L2 frame size or IMIX
545 rates: a list of 2 rates to run each direction
546 each rate is a dict like {'rate_pps': '10kpps'}
547 bidirectional: True if bidirectional
548 latency: True if latency measurement is needed
550 r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
552 raise TrafficGeneratorException(
553 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
554 .format(pps=r['rate_pps'],
556 load=r['rate_percent']))
557 # a dict of list of streams indexed by port#
558 # in case of fixed size, has self.chain_count * 2 * 2 streams
559 # (1 normal + 1 latency stream per direction per chain)
560 # for IMIX, has self.chain_count * 2 * 4 streams
561 # (3 normal + 1 latency stream per direction per chain)
563 for port in self.port_handle:
564 streamblock[port] = []
565 stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
566 self.rates = [utils.to_rate_str(rate) for rate in rates]
567 for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
568 streamblock[0].extend(self.generate_streams(self.port_handle[0],
573 if len(self.rates) > 1:
574 streamblock[1].extend(self.generate_streams(self.port_handle[1],
578 latency=bidirectional and latency))
580 for port in self.port_handle:
581 self.client.add_streams(streamblock[port], ports=port)
582 LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
584 def clear_streamblock(self):
585 """Clear all streams from TRex."""
587 self.client.reset(self.port_handle)
588 LOG.info('Cleared all existing streams')
591 """Get stats from Trex."""
592 stats = self.client.get_stats()
593 return self.extract_stats(stats)
596 """Return the Trex local port MAC addresses.
598 return: a list of MAC addresses indexed by the port#
600 return [port['src_mac'] for port in self.port_info]
602 def get_port_speed_gbps(self):
603 """Return the Trex local port MAC addresses.
605 return: a list of speed in Gbps indexed by the port#
607 return [port['speed'] for port in self.port_info]
609 def clear_stats(self):
610 """Clear all stats in the traffic gneerator."""
612 self.client.clear_stats()
614 def start_traffic(self):
615 """Start generating traffic in all ports."""
616 for port, rate in zip(self.port_handle, self.rates):
617 self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
619 def stop_traffic(self):
620 """Stop generating traffic."""
621 self.client.stop(ports=self.port_handle)
623 def start_capture(self):
624 """Capture all packets on both ports that are unicast to us."""
627 # Need to filter out unwanted packets so we do not end up counting
628 # src MACs of frames that are not unicast to us
629 src_mac_list = self.get_macs()
630 bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
631 # ports must be set in service in order to enable capture
632 self.client.set_service_mode(ports=self.port_handle)
633 self.capture_id = self.client.start_capture(rx_ports=self.port_handle,
634 bpf_filter=bpf_filter)
636 def fetch_capture_packets(self):
637 """Fetch capture packets in capture mode."""
639 self.packet_list = []
640 self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
641 output=self.packet_list)
643 def stop_capture(self):
644 """Stop capturing packets."""
646 self.client.stop_capture(capture_id=self.capture_id['id'])
647 self.capture_id = None
648 self.client.set_service_mode(ports=self.port_handle, enabled=False)
651 """Cleanup Trex driver."""
654 self.client.reset(self.port_handle)
655 self.client.disconnect()
657 # TRex does not like a reset while in disconnected state