NFVBENCH-187: Augment --l2-loopback command line option capabilities
[nfvbench.git] / nfvbench / traffic_gen / trex_gen.py
1 # Copyright 2016 Cisco Systems, Inc.  All rights reserved.
2 #
3 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
4 #    not use this file except in compliance with the License. You may obtain
5 #    a copy of the License at
6 #
7 #         http://www.apache.org/licenses/LICENSE-2.0
8 #
9 #    Unless required by applicable law or agreed to in writing, software
10 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 #    License for the specific language governing permissions and limitations
13 #    under the License.
14 """Driver module for TRex traffic generator."""
15
16 import math
17 import os
18 import sys
19 import random
20 import time
21 import traceback
22 from functools import reduce
23
24 from itertools import count
25 # pylint: disable=import-error
26 from scapy.contrib.mpls import MPLS  # flake8: noqa
27 # pylint: enable=import-error
28 from nfvbench.log import LOG
29 from nfvbench.traffic_server import TRexTrafficServer
30 from nfvbench.utils import cast_integer
31 from nfvbench.utils import timeout
32 from nfvbench.utils import TimeoutError
33
34 from hdrh.histogram import HdrHistogram
35
36 # pylint: disable=import-error
37 from trex.common.services.trex_service_arp import ServiceARP
38 from trex.stl.api import bind_layers
39 from trex.stl.api import CTRexVmInsFixHwCs
40 from trex.stl.api import Dot1Q
41 from trex.stl.api import Ether
42 from trex.stl.api import FlagsField
43 from trex.stl.api import IP
44 from trex.stl.api import Packet
45 from trex.stl.api import STLClient
46 from trex.stl.api import STLError
47 from trex.stl.api import STLFlowLatencyStats
48 from trex.stl.api import STLFlowStats
49 from trex.stl.api import STLPktBuilder
50 from trex.stl.api import STLScVmRaw
51 from trex.stl.api import STLStream
52 from trex.stl.api import STLTXCont
53 from trex.stl.api import STLVmFixChecksumHw
54 from trex.stl.api import STLVmFixIpv4
55 from trex.stl.api import STLVmFlowVar
56 from trex.stl.api import STLVmFlowVarRepeatableRandom
57 from trex.stl.api import STLVmTupleGen
58 from trex.stl.api import STLVmWrFlowVar
59 from trex.stl.api import ThreeBytesField
60 from trex.stl.api import UDP
61 from trex.stl.api import XByteField
62
63 # pylint: enable=import-error
64
65 from .traffic_base import AbstractTrafficGenerator
66 from .traffic_base import TrafficGeneratorException
67 from . import traffic_utils as utils
68 from .traffic_utils import IMIX_AVG_L2_FRAME_SIZE
69 from .traffic_utils import IMIX_L2_SIZES
70 from .traffic_utils import IMIX_RATIOS
71
72 class VXLAN(Packet):
73     """VxLAN class."""
74
75     _VXLAN_FLAGS = ['R' * 27] + ['I'] + ['R' * 5]
76     name = "VXLAN"
77     fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS),
78                    ThreeBytesField("vni", 0),
79                    XByteField("reserved", 0x00)]
80
81     def mysummary(self):
82         """Summary."""
83         return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
84
85 class TRex(AbstractTrafficGenerator):
86     """TRex traffic generator driver."""
87
88     LATENCY_PPS = 1000
89     CHAIN_PG_ID_MASK = 0x007F
90     PORT_PG_ID_MASK = 0x0080
91     LATENCY_PG_ID_MASK = 0x0100
92
93     def __init__(self, traffic_client):
94         """Trex driver."""
95         AbstractTrafficGenerator.__init__(self, traffic_client)
96         self.client = None
97         self.id = count()
98         self.port_handle = []
99         self.chain_count = self.generator_config.service_chain_count
100         self.rates = []
101         self.capture_id = None
102         self.packet_list = []
103         self.l2_frame_size = 0
104
105     def get_version(self):
106         """Get the Trex version."""
107         return self.client.get_server_version() if self.client else ''
108
109     def get_pg_id(self, port, chain_id):
110         """Calculate the packet group IDs to use for a given port/stream type/chain_id.
111
112         port: 0 or 1
113         chain_id: identifies to which chain the pg_id is associated (0 to 255)
114         return: pg_id, lat_pg_id
115
116         We use a bit mask to set up the 3 fields:
117         0x007F: chain ID (8 bits for a max of 128 chains)
118         0x0080: port bit
119         0x0100: latency bit
120         """
121         pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
122         return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
123
124     def extract_stats(self, in_stats, ifstats):
125         """Extract stats from dict returned by Trex API.
126
127         :param in_stats: dict as returned by TRex api
128         """
129         utils.nan_replace(in_stats)
130         # LOG.debug(in_stats)
131
132         result = {}
133         # port_handles should have only 2 elements: [0, 1]
134         # so (1 - ph) will be the index for the far end port
135         for ph in self.port_handle:
136             stats = in_stats[ph]
137             far_end_stats = in_stats[1 - ph]
138             result[ph] = {
139                 'tx': {
140                     'total_pkts': cast_integer(stats['opackets']),
141                     'total_pkt_bytes': cast_integer(stats['obytes']),
142                     'pkt_rate': cast_integer(stats['tx_pps']),
143                     'pkt_bit_rate': cast_integer(stats['tx_bps'])
144                 },
145                 'rx': {
146                     'total_pkts': cast_integer(stats['ipackets']),
147                     'total_pkt_bytes': cast_integer(stats['ibytes']),
148                     'pkt_rate': cast_integer(stats['rx_pps']),
149                     'pkt_bit_rate': cast_integer(stats['rx_bps']),
150                     # how many pkts were dropped in RX direction
151                     # need to take the tx counter on the far end port
152                     'dropped_pkts': cast_integer(
153                         far_end_stats['opackets'] - stats['ipackets'])
154                 }
155             }
156             self.__combine_latencies(in_stats, result[ph]['rx'], ph)
157
158         total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
159         result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
160         # actual offered tx rate in bps
161         avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
162         total_tx_bps = utils.pps_to_bps(result["total_tx_rate"], avg_packet_size)
163         result['offered_tx_rate_bps'] = total_tx_bps
164
165         result.update(self.get_theoretical_rates(avg_packet_size))
166
167         result["flow_stats"] = in_stats["flow_stats"]
168         result["latency"] = in_stats["latency"]
169
170         # Merge HDRHistogram to have an overall value for all chains and ports
171         try:
172             hdrh_list = []
173             if ifstats:
174                 for chain_id, _ in enumerate(ifstats):
175                     for ph in self.port_handle:
176                         _, lat_pg_id = self.get_pg_id(ph, chain_id)
177                         hdrh_list.append(
178                             HdrHistogram.decode(in_stats['latency'][lat_pg_id]['latency']['hdrh']))
179             else:
180                 for pg_id in in_stats['latency']:
181                     if pg_id != 'global':
182                         hdrh_list.append(
183                             HdrHistogram.decode(in_stats['latency'][pg_id]['latency']['hdrh']))
184
185             def add_hdrh(x, y):
186                 x.add(y)
187                 return x
188             decoded_hdrh = reduce(add_hdrh, hdrh_list)
189             result["hdrh"] = HdrHistogram.encode(decoded_hdrh).decode('utf-8')
190         except KeyError:
191             pass
192
193         return result
194
195     def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
196         """Extract the aggregated stats for a given chain.
197
198         trex_stats: stats as returned by get_stats()
199         if_stats: a list of 2 interface stats to update (port 0 and 1)
200         latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
201                    latencies[p] is the latency for packets sent on port p
202                    if there are no latency streams, the Latency instances are not modified
203         chain_idx: chain index of the interface stats
204
205         The packet counts include normal and latency streams.
206
207         Trex returns flows stats as follows:
208
209         'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
210                    'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
211                    'rx_bytes': {0: nan, 1: nan, 'total': nan},
212                    'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
213                    'rx_pps': {0: 0, 1: 0, 'total': 0},
214                    'tx_bps': {0: 0, 1: 0, 'total': 0},
215                    'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
216                    'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
217                    'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
218                    'tx_pps': {0: 0, 1: 0, 'total': 0}},
219                1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
220                    'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
221                    'rx_bytes': {0: nan, 1: nan, 'total': nan},
222                    'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
223                    'rx_pps': {0: 0, 1: 0, 'total': 0},
224                    'tx_bps': {0: 0, 1: 0, 'total': 0},
225                    'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
226                    'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
227                    'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
228                    'tx_pps': {0: 0, 1: 0, 'total': 0}},
229                 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
230                 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
231                 'rx_bytes': {0: nan, 1: nan, 'total': nan},
232                 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
233                 'rx_pps': {0: 0, 1: 0, 'total': 0},
234                 'tx_bps': {0: 0, 1: 0, 'total': 0},
235                 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
236                 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
237                 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
238                 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
239
240         the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
241         get_pg_id() method.
242         packet counters for a given stream sent on port p are reported as:
243         - tx_pkts[p] on port p
244         - rx_pkts[1-p] on the far end port
245
246         This is a tricky/critical counter transposition operation because
247         the results are grouped by port (not by stream):
248         tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
249         rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
250         tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
251         rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
252
253         or using a more generic formula:
254         tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
255         rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
256
257         the second formula is equivalent to
258         rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
259
260         If there are latency streams, those same counters need to be added in the same way
261         """
262         def get_latency(lval):
263             try:
264                 return int(round(lval))
265             except ValueError:
266                 return 0
267
268         for ifs in if_stats:
269             ifs.tx = ifs.rx = 0
270         for port in range(2):
271             pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
272             for pid in [pg_id, lat_pg_id]:
273                 try:
274                     pg_stats = trex_stats['flow_stats'][pid]
275                     if_stats[port].tx += pg_stats['tx_pkts'][port]
276                     if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
277                 except KeyError:
278                     pass
279             try:
280                 lat = trex_stats['latency'][lat_pg_id]['latency']
281                 # dropped_pkts += lat['err_cntrs']['dropped']
282                 latencies[port].max_usec = get_latency(lat['total_max'])
283                 if math.isnan(lat['total_min']):
284                     latencies[port].min_usec = 0
285                     latencies[port].avg_usec = 0
286                 else:
287                     latencies[port].min_usec = get_latency(lat['total_min'])
288                     latencies[port].avg_usec = get_latency(lat['average'])
289                 # pick up the HDR histogram if present (otherwise will raise KeyError)
290                 latencies[port].hdrh = lat['hdrh']
291             except KeyError:
292                 pass
293
294     def __combine_latencies(self, in_stats, results, port_handle):
295         """Traverse TRex result dictionary and combines chosen latency stats.
296
297           example of latency dict returned by trex (2 chains):
298          'latency': {256: {'err_cntrs': {'dropped': 0,
299                                  'dup': 0,
300                                  'out_of_order': 0,
301                                  'seq_too_high': 0,
302                                  'seq_too_low': 0},
303                             'latency': {'average': 26.5,
304                                         'hdrh': u'HISTFAAAAEx4nJNpmSgz...bFRgxi',
305                                         'histogram': {20: 303,
306                                                         30: 320,
307                                                         40: 300,
308                                                         50: 73,
309                                                         60: 4,
310                                                         70: 1},
311                                         'jitter': 14,
312                                         'last_max': 63,
313                                         'total_max': 63,
314                                         'total_min': 20}},
315                     257: {'err_cntrs': {'dropped': 0,
316                                         'dup': 0,
317                                         'out_of_order': 0,
318                                         'seq_too_high': 0,
319                                         'seq_too_low': 0},
320                             'latency': {'average': 29.75,
321                                         'hdrh': u'HISTFAAAAEV4nJN...CALilDG0=',
322                                         'histogram': {20: 261,
323                                                         30: 431,
324                                                         40: 3,
325                                                         50: 80,
326                                                         60: 225},
327                                         'jitter': 23,
328                                         'last_max': 67,
329                                         'total_max': 67,
330                                         'total_min': 20}},
331                     384: {'err_cntrs': {'dropped': 0,
332                                         'dup': 0,
333                                         'out_of_order': 0,
334                                         'seq_too_high': 0,
335                                         'seq_too_low': 0},
336                             'latency': {'average': 18.0,
337                                         'hdrh': u'HISTFAAAADR4nJNpm...MjCwDDxAZG',
338                                         'histogram': {20: 987, 30: 14},
339                                         'jitter': 0,
340                                         'last_max': 34,
341                                         'total_max': 34,
342                                         'total_min': 20}},
343                     385: {'err_cntrs': {'dropped': 0,
344                                     'dup': 0,
345                                     'out_of_order': 0,
346                                     'seq_too_high': 0,
347                                     'seq_too_low': 0},
348                             'latency': {'average': 19.0,
349                                         'hdrh': u'HISTFAAAADR4nJNpm...NkYmJgDdagfK',
350                                         'histogram': {20: 989, 30: 11},
351                                         'jitter': 0,
352                                         'last_max': 38,
353                                         'total_max': 38,
354                                         'total_min': 20}},
355                     'global': {'bad_hdr': 0, 'old_flow': 0}},
356         """
357         total_max = 0
358         average = 0
359         total_min = float("inf")
360         for chain_id in range(self.chain_count):
361             try:
362                 _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
363                 lat = in_stats['latency'][lat_pg_id]['latency']
364                 # dropped_pkts += lat['err_cntrs']['dropped']
365                 total_max = max(lat['total_max'], total_max)
366                 total_min = min(lat['total_min'], total_min)
367                 average += lat['average']
368             except KeyError:
369                 pass
370         if total_min == float("inf"):
371             total_min = 0
372         results['min_delay_usec'] = total_min
373         results['max_delay_usec'] = total_max
374         results['avg_delay_usec'] = int(average / self.chain_count)
375
376     def _bind_vxlan(self):
377         bind_layers(UDP, VXLAN, dport=4789)
378         bind_layers(VXLAN, Ether)
379
380     def _create_pkt(self, stream_cfg, l2frame_size, disable_random_latency_flow=False):
381         """Create a packet of given size.
382
383         l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
384         """
385         # Trex will add the FCS field, so we need to remove 4 bytes from the l2 frame size
386         frame_size = int(l2frame_size) - 4
387         vm_param = []
388         if stream_cfg['vxlan'] is True:
389             self._bind_vxlan()
390             encap_level = '1'
391             pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
392             if stream_cfg['vtep_vlan'] is not None:
393                 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
394             pkt_base /= IP(src=stream_cfg['vtep_src_ip'], dst=stream_cfg['vtep_dst_ip'])
395             pkt_base /= UDP(sport=random.randint(1337, 32767), dport=4789)
396             pkt_base /= VXLAN(vni=stream_cfg['net_vni'])
397             pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
398             # need to randomize the outer header UDP src port based on flow
399             vxlan_udp_src_fv = STLVmFlowVar(
400                 name="vxlan_udp_src",
401                 min_value=1337,
402                 max_value=32767,
403                 size=2,
404                 op="random")
405             vm_param = [vxlan_udp_src_fv,
406                         STLVmWrFlowVar(fv_name="vxlan_udp_src", pkt_offset="UDP.sport")]
407         elif stream_cfg['mpls'] is True:
408             encap_level = '0'
409             pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
410             if stream_cfg['vtep_vlan'] is not None:
411                 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
412             if stream_cfg['mpls_outer_label'] is not None:
413                 pkt_base /= MPLS(label=stream_cfg['mpls_outer_label'], cos=1, s=0, ttl=255)
414             if stream_cfg['mpls_inner_label'] is not None:
415                 pkt_base /= MPLS(label=stream_cfg['mpls_inner_label'], cos=1, s=1, ttl=255)
416             #  Flow stats and MPLS labels randomization TBD
417             pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
418         else:
419             encap_level = '0'
420             pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
421
422         if stream_cfg['vlan_tag'] is not None:
423             pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
424
425         udp_args = {}
426         if stream_cfg['udp_src_port']:
427             udp_args['sport'] = int(stream_cfg['udp_src_port'])
428             if stream_cfg['udp_port_step'] == 'random':
429                 step = 1
430             else:
431                 step = stream_cfg['udp_port_step']
432             udp_args['sport_step'] = int(step)
433             udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
434         if stream_cfg['udp_dst_port']:
435             udp_args['dport'] = int(stream_cfg['udp_dst_port'])
436             if stream_cfg['udp_port_step'] == 'random':
437                 step = 1
438             else:
439                 step = stream_cfg['udp_port_step']
440             udp_args['dport_step'] = int(step)
441             udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
442
443         pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
444                     UDP(dport=udp_args['dport'], sport=udp_args['sport'])
445
446         # STLVmTupleGen need flow count >= cores used by TRex, if FC < cores we used STLVmFlowVar
447         if stream_cfg['ip_addrs_step'] == '0.0.0.1' and stream_cfg['udp_port_step'] == '1' and \
448                 stream_cfg['count'] >= self.generator_config.cores:
449             src_fv = STLVmTupleGen(ip_min=stream_cfg['ip_src_addr'],
450                                    ip_max=stream_cfg['ip_src_addr_max'],
451                                    port_min=udp_args['sport'],
452                                    port_max=udp_args['sport_max'],
453                                    name="tuple_src",
454                                    limit_flows=stream_cfg['count'])
455             dst_fv = STLVmTupleGen(ip_min=stream_cfg['ip_dst_addr'],
456                                    ip_max=stream_cfg['ip_dst_addr_max'],
457                                    port_min=udp_args['dport'],
458                                    port_max=udp_args['dport_max'],
459                                    name="tuple_dst",
460                                    limit_flows=stream_cfg['count'])
461             vm_param = [
462                 src_fv,
463                 STLVmWrFlowVar(fv_name="tuple_src.ip",
464                                pkt_offset="IP:{}.src".format(encap_level)),
465                 STLVmWrFlowVar(fv_name="tuple_src.port",
466                                pkt_offset="UDP:{}.sport".format(encap_level)),
467                 dst_fv,
468                 STLVmWrFlowVar(fv_name="tuple_dst.ip",
469                                pkt_offset="IP:{}.dst".format(encap_level)),
470                 STLVmWrFlowVar(fv_name="tuple_dst.port",
471                                pkt_offset="UDP:{}.dport".format(encap_level)),
472             ]
473         else:
474             if disable_random_latency_flow:
475                 src_fv_ip = STLVmFlowVar(
476                     name="ip_src",
477                     min_value=stream_cfg['ip_src_addr'],
478                     max_value=stream_cfg['ip_src_addr'],
479                     size=4)
480                 dst_fv_ip = STLVmFlowVar(
481                     name="ip_dst",
482                     min_value=stream_cfg['ip_dst_addr'],
483                     max_value=stream_cfg['ip_dst_addr'],
484                     size=4)
485             elif stream_cfg['ip_addrs_step'] == 'random':
486                 src_fv_ip = STLVmFlowVarRepeatableRandom(
487                     name="ip_src",
488                     min_value=stream_cfg['ip_src_addr'],
489                     max_value=stream_cfg['ip_src_addr_max'],
490                     size=4,
491                     seed=random.randint(0, 32767),
492                     limit=stream_cfg['ip_src_count'])
493                 dst_fv_ip = STLVmFlowVarRepeatableRandom(
494                     name="ip_dst",
495                     min_value=stream_cfg['ip_dst_addr'],
496                     max_value=stream_cfg['ip_dst_addr_max'],
497                     size=4,
498                     seed=random.randint(0, 32767),
499                     limit=stream_cfg['ip_dst_count'])
500             else:
501                 src_fv_ip = STLVmFlowVar(
502                     name="ip_src",
503                     min_value=stream_cfg['ip_src_addr'],
504                     max_value=stream_cfg['ip_src_addr_max'],
505                     size=4,
506                     op="inc",
507                     step=stream_cfg['ip_addrs_step'])
508                 dst_fv_ip = STLVmFlowVar(
509                     name="ip_dst",
510                     min_value=stream_cfg['ip_dst_addr'],
511                     max_value=stream_cfg['ip_dst_addr_max'],
512                     size=4,
513                     op="inc",
514                     step=stream_cfg['ip_addrs_step'])
515
516             if disable_random_latency_flow:
517                 src_fv_port = STLVmFlowVar(
518                     name="p_src",
519                     min_value=udp_args['sport'],
520                     max_value=udp_args['sport'],
521                     size=2)
522                 dst_fv_port = STLVmFlowVar(
523                     name="p_dst",
524                     min_value=udp_args['dport'],
525                     max_value=udp_args['dport'],
526                     size=2)
527             elif stream_cfg['udp_port_step'] == 'random':
528                 src_fv_port = STLVmFlowVarRepeatableRandom(
529                     name="p_src",
530                     min_value=udp_args['sport'],
531                     max_value=udp_args['sport_max'],
532                     size=2,
533                     seed=random.randint(0, 32767),
534                     limit=stream_cfg['udp_src_count'])
535                 dst_fv_port = STLVmFlowVarRepeatableRandom(
536                     name="p_dst",
537                     min_value=udp_args['dport'],
538                     max_value=udp_args['dport_max'],
539                     size=2,
540                     seed=random.randint(0, 32767),
541                     limit=stream_cfg['udp_dst_count'])
542             else:
543                 src_fv_port = STLVmFlowVar(
544                     name="p_src",
545                     min_value=udp_args['sport'],
546                     max_value=udp_args['sport_max'],
547                     size=2,
548                     op="inc",
549                     step=udp_args['sport_step'])
550                 dst_fv_port = STLVmFlowVar(
551                     name="p_dst",
552                     min_value=udp_args['dport'],
553                     max_value=udp_args['dport_max'],
554                     size=2,
555                     op="inc",
556                     step=udp_args['dport_step'])
557             vm_param = [
558                 src_fv_ip,
559                 STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
560                 src_fv_port,
561                 STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
562                 dst_fv_ip,
563                 STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
564                 dst_fv_port,
565                 STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
566             ]
567         # Use HW Offload to calculate the outter IP/UDP packet
568         vm_param.append(STLVmFixChecksumHw(l3_offset="IP:0",
569                                            l4_offset="UDP:0",
570                                            l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP))
571         # Use software to fix the inner IP/UDP payload for VxLAN packets
572         if int(encap_level):
573             vm_param.append(STLVmFixIpv4(offset="IP:1"))
574         pad = max(0, frame_size - len(pkt_base)) * 'x'
575
576         return STLPktBuilder(pkt=pkt_base / pad,
577                              vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
578
579     def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
580                          e2e=False):
581         """Create a list of streams corresponding to a given chain and stream config.
582
583         port: port where the streams originate (0 or 1)
584         chain_id: the chain to which the streams are associated to
585         stream_cfg: stream configuration
586         l2frame: L2 frame size (including 4-byte FCS) or 'IMIX'
587         latency: if True also create a latency stream
588         e2e: True if performing "end to end" connectivity check
589         """
590         streams = []
591         pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
592         if l2frame == 'IMIX':
593             for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
594                 pkt = self._create_pkt(stream_cfg, l2_frame_size)
595                 if e2e or stream_cfg['mpls']:
596                     streams.append(STLStream(packet=pkt,
597                                              mode=STLTXCont(pps=ratio)))
598                 else:
599                     if stream_cfg['vxlan'] is True:
600                         streams.append(STLStream(packet=pkt,
601                                                  flow_stats=STLFlowStats(pg_id=pg_id,
602                                                                          vxlan=True)
603                                                     if not self.config.no_flow_stats else None,
604                                                  mode=STLTXCont(pps=ratio)))
605                     else:
606                         streams.append(STLStream(packet=pkt,
607                                                  flow_stats=STLFlowStats(pg_id=pg_id)
608                                                     if not self.config.no_flow_stats else None,
609                                                  mode=STLTXCont(pps=ratio)))
610
611             if latency:
612                 # for IMIX, the latency packets have the average IMIX packet size
613                 if stream_cfg['ip_addrs_step'] == 'random' or \
614                         stream_cfg['udp_port_step'] == 'random':
615                     # Force latency flow to only one flow to avoid creating flows
616                     # over requested flow count
617                     pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE, True)
618                 else:
619                     pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
620
621         else:
622             l2frame_size = int(l2frame)
623             pkt = self._create_pkt(stream_cfg, l2frame_size)
624             if e2e or stream_cfg['mpls']:
625                 streams.append(STLStream(packet=pkt,
626                                          # Flow stats is disabled for MPLS now
627                                          # flow_stats=STLFlowStats(pg_id=pg_id),
628                                          mode=STLTXCont()))
629             else:
630                 if stream_cfg['vxlan'] is True:
631                     streams.append(STLStream(packet=pkt,
632                                              flow_stats=STLFlowStats(pg_id=pg_id,
633                                                                      vxlan=True)
634                                                 if not self.config.no_flow_stats else None,
635                                              mode=STLTXCont()))
636                 else:
637                     streams.append(STLStream(packet=pkt,
638                                              flow_stats=STLFlowStats(pg_id=pg_id)
639                                                 if not self.config.no_flow_stats else None,
640                                              mode=STLTXCont()))
641             # for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
642             # without vlan, the min l2 frame size is 64
643             # with vlan it is 68
644             # This only applies to the latency stream
645             if latency:
646                 if stream_cfg['vlan_tag'] and l2frame_size < 68:
647                     l2frame_size = 68
648                 if stream_cfg['ip_addrs_step'] == 'random' or \
649                         stream_cfg['udp_port_step'] == 'random':
650                         # Force latency flow to only one flow to avoid creating flows
651                         # over requested flow count
652                     pkt = self._create_pkt(stream_cfg, l2frame_size, True)
653                 else:
654                     pkt = self._create_pkt(stream_cfg, l2frame_size)
655
656         if latency:
657             if self.config.no_latency_stats:
658                 LOG.info("Latency flow statistics are disabled.")
659             if stream_cfg['vxlan'] is True:
660                 streams.append(STLStream(packet=pkt,
661                                          flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
662                                                                         vxlan=True)
663                                             if not self.config.no_latency_stats else None,
664                                          mode=STLTXCont(pps=self.LATENCY_PPS)))
665             else:
666                 streams.append(STLStream(packet=pkt,
667                                          flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
668                                             if not self.config.no_latency_stats else None,
669                                          mode=STLTXCont(pps=self.LATENCY_PPS)))
670         return streams
671
672     @timeout(5)
673     def __connect(self, client):
674         client.connect()
675
676     def __local_server_status(self):
677         """ The TRex server may have started but failed initializing... and stopped.
678         This piece of code is especially designed to address
679         the case when a fatal failure occurs on a DPDK init call.
680         The TRex algorihm should be revised to include some missing timeouts (?)
681         status returned:
682           0: no error detected
683           1: fatal error detected - should lead to exiting the run
684           2: error detected that could be solved by starting again
685         The diagnostic is based on parsing the local trex log file (improvable)
686         """
687         status = 0
688         message = None
689         failure = None
690         exited = None
691         cause = None
692         error = None
693         before = None
694         after = None
695         last = None
696         try:
697             with open('/tmp/trex.log', 'r') as trex_log:
698                 for _line in trex_log:
699                     line = _line.strip()
700                     if line.startswith('Usage:'):
701                         break
702                     if 'ports are bound' in line:
703                         continue
704                     if 'please wait' in line:
705                         continue
706                     if 'exit' in line.lower():
707                         exited = line
708                     elif 'cause' in line.lower():
709                         cause = line
710                     elif 'fail' in line.lower():
711                         failure = line
712                     elif 'msg' in line.lower():
713                         message = line
714                     elif (error is not None) and line:
715                         after = line
716                     elif line.startswith('Error:') or line.startswith('ERROR'):
717                         error = line
718                         before = last
719                     last = line
720         except FileNotFoundError:
721             pass
722         if exited is not None:
723             status = 1
724             LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', exited)
725             if cause is not None:
726                 LOG.info("TRex [cont'd] %s", cause)
727             if failure is not None:
728                 LOG.info("TRex [cont'd] %s", failure)
729             if message is not None:
730                 LOG.info("TRex [cont'd] %s", message)
731                 if 'not supported yet' in message.lower():
732                     LOG.info("TRex [cont'd] Try starting again!")
733                     status = 2
734         elif error is not None:
735             status = 1
736             LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', error)
737             if after is not None:
738                 LOG.info("TRex [cont'd] %s", after)
739             elif before is not None:
740                 LOG.info("TRex [cont'd] %s", before)
741         return status
742
743     def __connect_after_start(self):
744         # after start, Trex may take a bit of time to initialize
745         # so we need to retry a few times
746         # we try to capture recoverable error cases (checking status)
747         status = 0
748         for it in range(self.config.generic_retry_count):
749             try:
750                 time.sleep(1)
751                 self.client.connect()
752                 break
753             except Exception as ex:
754                 if it == (self.config.generic_retry_count - 1):
755                     raise
756                 status = self.__local_server_status()
757                 if status > 0:
758                     # No need to wait anymore, something went wrong and TRex exited
759                     if status == 1:
760                         LOG.info("\x1b[1m%s\x1b[0m", 'TRex failed starting!')
761                         print("More information? Try the command: "
762                             + "\x1b[1mnfvbench --show-trex-log\x1b[0m")
763                         sys.exit(0)
764                     if status == 2:
765                         # a new start will follow
766                         return status
767                 LOG.info("Retrying connection to TRex (%s)...", ex.msg)
768         return status
769
770     def connect(self):
771         """Connect to the TRex server."""
772         status = 0
773         server_ip = self.generator_config.ip
774         LOG.info("Connecting to TRex (%s)...", server_ip)
775
776         # Connect to TRex server
777         self.client = STLClient(server=server_ip, sync_port=self.generator_config.zmq_rpc_port,
778                                 async_port=self.generator_config.zmq_pub_port)
779         try:
780             self.__connect(self.client)
781             if server_ip == '127.0.0.1':
782                 config_updated = self.__check_config()
783                 if config_updated or self.config.restart:
784                     status = self.__restart()
785         except (TimeoutError, STLError) as e:
786             if server_ip == '127.0.0.1':
787                 status = self.__start_local_server()
788             else:
789                 raise TrafficGeneratorException(e.message) from e
790
791         if status == 2:
792             # Workaround in case of a failed TRex server initialization
793             # we try to start it again (twice maximum)
794             # which may allow low level initialization to complete.
795             if self.__start_local_server() == 2:
796                 self.__start_local_server()
797
798         ports = list(self.generator_config.ports)
799         self.port_handle = ports
800         # Prepare the ports
801         self.client.reset(ports)
802         # Read HW information from each port
803         # this returns an array of dict (1 per port)
804         """
805         Example of output for Intel XL710
806         [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
807           'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
808           u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
809           u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
810           u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
811           'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
812           u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
813           'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
814           'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
815           'layer_mode': 'Ethernet', u'numa': 0}, ...]
816         """
817         self.port_info = self.client.get_port_info(ports)
818         LOG.info('Connected to TRex')
819         for id, port in enumerate(self.port_info):
820             LOG.info('   Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
821                      id, port['description'], port['speed'], port['src_mac'],
822                      port['pci_addr'], port['driver'])
823         # Make sure the 2 ports have the same speed
824         if self.port_info[0]['speed'] != self.port_info[1]['speed']:
825             raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
826                                             (self.port_info[0]['speed'],
827                                              self.port_info[1]['speed']))
828
829     def __start_local_server(self):
830         try:
831             LOG.info("Starting TRex ...")
832             self.__start_server()
833             status = self.__connect_after_start()
834         except (TimeoutError, STLError) as e:
835             LOG.error('Cannot connect to TRex')
836             LOG.error(traceback.format_exc())
837             logpath = '/tmp/trex.log'
838             if os.path.isfile(logpath):
839                 # Wait for TRex to finish writing error message
840                 last_size = 0
841                 for _ in range(self.config.generic_retry_count):
842                     size = os.path.getsize(logpath)
843                     if size == last_size:
844                         # probably not writing anymore
845                         break
846                     last_size = size
847                     time.sleep(1)
848                 with open(logpath, 'r') as f:
849                     message = f.read()
850             else:
851                 message = e.message
852             raise TrafficGeneratorException(message) from e
853         return status
854
855     def __start_server(self):
856         server = TRexTrafficServer()
857         server.run_server(self.generator_config)
858
859     def __check_config(self):
860         server = TRexTrafficServer()
861         return server.check_config_updated(self.generator_config)
862
863     def __restart(self):
864         LOG.info("Restarting TRex ...")
865         self.__stop_server()
866         # Wait for server stopped
867         for _ in range(self.config.generic_retry_count):
868             time.sleep(1)
869             if not self.client.is_connected():
870                 LOG.info("TRex is stopped...")
871                 break
872         # Start and report a possible failure
873         return self.__start_local_server()
874
875     def __stop_server(self):
876         if self.generator_config.ip == '127.0.0.1':
877             ports = self.client.get_acquired_ports()
878             LOG.info('Release ports %s and stopping TRex...', ports)
879             try:
880                 if ports:
881                     self.client.release(ports=ports)
882                 self.client.server_shutdown()
883             except STLError as e:
884                 LOG.warning('Unable to stop TRex. Error: %s', e)
885         else:
886             LOG.info('Using remote TRex. Unable to stop TRex')
887
888     def resolve_arp(self):
889         """Resolve all configured remote IP addresses.
890
891         return: None if ARP failed to resolve for all IP addresses
892                 else a dict of list of dest macs indexed by port#
893                 the dest macs in the list are indexed by the chain id
894         """
895         self.client.set_service_mode(ports=self.port_handle)
896         LOG.info('Polling ARP until successful...')
897         arp_dest_macs = {}
898         for port, device in zip(self.port_handle, self.generator_config.devices):
899             # there should be 1 stream config per chain
900             stream_configs = device.get_stream_configs()
901             chain_count = len(stream_configs)
902             ctx = self.client.create_service_ctx(port=port)
903             # all dest macs on this port indexed by chain ID
904             dst_macs = [None] * chain_count
905             dst_macs_count = 0
906             # the index in the list is the chain id
907             if self.config.vxlan or self.config.mpls:
908                 arps = [
909                     ServiceARP(ctx,
910                                src_ip=device.vtep_src_ip,
911                                dst_ip=device.vtep_dst_ip,
912                                vlan=device.vtep_vlan)
913                     for cfg in stream_configs
914                 ]
915             else:
916                 arps = [
917                     ServiceARP(ctx,
918                                src_ip=cfg['ip_src_tg_gw'],
919                                dst_ip=cfg['mac_discovery_gw'],
920                                # will be None if no vlan tagging
921                                vlan=cfg['vlan_tag'])
922                     for cfg in stream_configs
923                 ]
924
925             for attempt in range(self.config.generic_retry_count):
926                 try:
927                     ctx.run(arps)
928                 except STLError:
929                     LOG.error(traceback.format_exc())
930                     continue
931
932                 unresolved = []
933                 for chain_id, mac in enumerate(dst_macs):
934                     if not mac:
935                         arp_record = arps[chain_id].get_record()
936                         if arp_record.dst_mac:
937                             dst_macs[chain_id] = arp_record.dst_mac
938                             dst_macs_count += 1
939                             LOG.info('   ARP: port=%d chain=%d src IP=%s dst IP=%s -> MAC=%s',
940                                      port, chain_id,
941                                      arp_record.src_ip,
942                                      arp_record.dst_ip, arp_record.dst_mac)
943                         else:
944                             unresolved.append(arp_record.dst_ip)
945                 if dst_macs_count == chain_count:
946                     arp_dest_macs[port] = dst_macs
947                     LOG.info('ARP resolved successfully for port %s', port)
948                     break
949
950                 retry = attempt + 1
951                 LOG.info('Retrying ARP for: %s (retry %d/%d)',
952                          unresolved, retry, self.config.generic_retry_count)
953                 if retry < self.config.generic_retry_count:
954                     time.sleep(self.config.generic_poll_sec)
955             else:
956                 LOG.error('ARP timed out for port %s (resolved %d out of %d)',
957                           port,
958                           dst_macs_count,
959                           chain_count)
960                 break
961
962         # A traffic capture may have been started (from a T-Rex console) at this time.
963         # If asked so, we keep the service mode enabled here, and disable it otherwise.
964         #  | Disabling the service mode while a capture is in progress
965         #  | would cause the application to stop/crash with an error.
966         if not self.config.service_mode:
967             self.client.set_service_mode(ports=self.port_handle, enabled=False)
968         if len(arp_dest_macs) == len(self.port_handle):
969             return arp_dest_macs
970         return None
971
972     def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
973         """Check if rate provided by user is above requirements. Applies only if latency is True."""
974         intf_speed = self.generator_config.intf_speed
975         if latency:
976             if bidirectional:
977                 mult = 2
978                 total_rate = 0
979                 for rate in rates:
980                     r = utils.convert_rates(l2frame_size, rate, intf_speed)
981                     total_rate += int(r['rate_pps'])
982             else:
983                 mult = 1
984                 r = utils.convert_rates(l2frame_size, rates[0], intf_speed)
985                 total_rate = int(r['rate_pps'])
986             # rate must be enough for latency stream and at least 1 pps for base stream per chain
987             required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
988             result = utils.convert_rates(l2frame_size,
989                                          {'rate_pps': required_rate},
990                                          intf_speed * mult)
991             result['result'] = total_rate >= required_rate
992             return result
993
994         return {'result': True}
995
996     def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
997         """Program all the streams in Trex server.
998
999         l2frame_size: L2 frame size or IMIX
1000         rates: a list of 2 rates to run each direction
1001                each rate is a dict like {'rate_pps': '10kpps'}
1002         bidirectional: True if bidirectional
1003         latency: True if latency measurement is needed
1004         e2e: True if performing "end to end" connectivity check
1005         """
1006         if self.config.no_flow_stats:
1007             LOG.info("Traffic flow statistics are disabled.")
1008         r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
1009         if not r['result']:
1010             raise TrafficGeneratorException(
1011                 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
1012                 .format(pps=r['rate_pps'],
1013                         bps=r['rate_bps'],
1014                         load=r['rate_percent']))
1015         self.l2_frame_size = l2frame_size
1016         # a dict of list of streams indexed by port#
1017         # in case of fixed size, has self.chain_count * 2 * 2 streams
1018         # (1 normal + 1 latency stream per direction per chain)
1019         # for IMIX, has self.chain_count * 2 * 4 streams
1020         # (3 normal + 1 latency stream per direction per chain)
1021         streamblock = {}
1022         for port in self.port_handle:
1023             streamblock[port] = []
1024         stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
1025         if self.generator_config.ip_addrs_step == 'random' \
1026                 or self.generator_config.gen_config.udp_port_step == 'random':
1027             LOG.warning("Using random step, the number of flows can be less than "
1028                         "the requested number of flows due to repeatable multivariate random "
1029                         "generation which can reproduce the same pattern of values")
1030         self.rates = [utils.to_rate_str(rate) for rate in rates]
1031         for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
1032             streamblock[0].extend(self.generate_streams(self.port_handle[0],
1033                                                         chain_id,
1034                                                         fwd_stream_cfg,
1035                                                         l2frame_size,
1036                                                         latency=latency,
1037                                                         e2e=e2e))
1038             if len(self.rates) > 1:
1039                 streamblock[1].extend(self.generate_streams(self.port_handle[1],
1040                                                             chain_id,
1041                                                             rev_stream_cfg,
1042                                                             l2frame_size,
1043                                                             latency=bidirectional and latency,
1044                                                             e2e=e2e))
1045
1046         for port in self.port_handle:
1047             if self.config.vxlan:
1048                 self.client.set_port_attr(ports=port, vxlan_fs=[4789])
1049             else:
1050                 self.client.set_port_attr(ports=port, vxlan_fs=None)
1051             self.client.add_streams(streamblock[port], ports=port)
1052             LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
1053
1054     def clear_streamblock(self):
1055         """Clear all streams from TRex."""
1056         self.rates = []
1057         self.client.reset(self.port_handle)
1058         LOG.info('Cleared all existing streams')
1059
1060     def get_stats(self, if_stats=None):
1061         """Get stats from Trex."""
1062         stats = self.client.get_stats()
1063         return self.extract_stats(stats, if_stats)
1064
1065     def get_macs(self):
1066         """Return the Trex local port MAC addresses.
1067
1068         return: a list of MAC addresses indexed by the port#
1069         """
1070         return [port['src_mac'] for port in self.port_info]
1071
1072     def get_port_speed_gbps(self):
1073         """Return the Trex local port MAC addresses.
1074
1075         return: a list of speed in Gbps indexed by the port#
1076         """
1077         return [port['speed'] for port in self.port_info]
1078
1079     def clear_stats(self):
1080         """Clear all stats in the traffic gneerator."""
1081         if self.port_handle:
1082             self.client.clear_stats()
1083
1084     def start_traffic(self):
1085         """Start generating traffic in all ports."""
1086         for port, rate in zip(self.port_handle, self.rates):
1087             self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
1088
1089     def stop_traffic(self):
1090         """Stop generating traffic."""
1091         self.client.stop(ports=self.port_handle)
1092
1093     def start_capture(self):
1094         """Capture all packets on both ports that are unicast to us."""
1095         if self.capture_id:
1096             self.stop_capture()
1097         # Need to filter out unwanted packets so we do not end up counting
1098         # src MACs of frames that are not unicast to us
1099         src_mac_list = self.get_macs()
1100         bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
1101         # ports must be set in service in order to enable capture
1102         self.client.set_service_mode(ports=self.port_handle)
1103         self.capture_id = self.client.start_capture \
1104             (rx_ports=self.port_handle, bpf_filter=bpf_filter)
1105
1106     def fetch_capture_packets(self):
1107         """Fetch capture packets in capture mode."""
1108         if self.capture_id:
1109             self.packet_list = []
1110             self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
1111                                               output=self.packet_list)
1112
1113     def stop_capture(self):
1114         """Stop capturing packets."""
1115         if self.capture_id:
1116             self.client.stop_capture(capture_id=self.capture_id['id'])
1117             self.capture_id = None
1118             # A traffic capture may have been started (from a T-Rex console) at this time.
1119             # If asked so, we keep the service mode enabled here, and disable it otherwise.
1120             #  | Disabling the service mode while a capture is in progress
1121             #  | would cause the application to stop/crash with an error.
1122             if not self.config.service_mode:
1123                 self.client.set_service_mode(ports=self.port_handle, enabled=False)
1124
1125     def cleanup(self):
1126         """Cleanup Trex driver."""
1127         if self.client:
1128             try:
1129                 self.client.reset(self.port_handle)
1130                 self.client.disconnect()
1131             except STLError:
1132                 # TRex does not like a reset while in disconnected state
1133                 pass
1134
1135     def set_service_mode(self, enabled=True):
1136         """Enable/disable the 'service' mode."""
1137         self.client.set_service_mode(ports=self.port_handle, enabled=enabled)