NFVBENCH-192: Complete/fix hdrh related processings to consider all cases
[nfvbench.git] / nfvbench / traffic_gen / trex_gen.py
1 # Copyright 2016 Cisco Systems, Inc.  All rights reserved.
2 #
3 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
4 #    not use this file except in compliance with the License. You may obtain
5 #    a copy of the License at
6 #
7 #         http://www.apache.org/licenses/LICENSE-2.0
8 #
9 #    Unless required by applicable law or agreed to in writing, software
10 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 #    License for the specific language governing permissions and limitations
13 #    under the License.
14 """Driver module for TRex traffic generator."""
15
16 import math
17 import os
18 import sys
19 import random
20 import time
21 import traceback
22 from functools import reduce
23
24 from itertools import count
25 # pylint: disable=import-error
26 from scapy.contrib.mpls import MPLS  # flake8: noqa
27 # pylint: enable=import-error
28 from nfvbench.log import LOG
29 from nfvbench.traffic_server import TRexTrafficServer
30 from nfvbench.utils import cast_integer
31 from nfvbench.utils import timeout
32 from nfvbench.utils import TimeoutError
33
34 from hdrh.histogram import HdrHistogram
35
36 # pylint: disable=import-error
37 from trex.common.services.trex_service_arp import ServiceARP
38 from trex.stl.api import bind_layers
39 from trex.stl.api import CTRexVmInsFixHwCs
40 from trex.stl.api import Dot1Q
41 from trex.stl.api import Ether
42 from trex.stl.api import FlagsField
43 from trex.stl.api import IP
44 from trex.stl.api import Packet
45 from trex.stl.api import STLClient
46 from trex.stl.api import STLError
47 from trex.stl.api import STLFlowLatencyStats
48 from trex.stl.api import STLFlowStats
49 from trex.stl.api import STLPktBuilder
50 from trex.stl.api import STLScVmRaw
51 from trex.stl.api import STLStream
52 from trex.stl.api import STLTXCont
53 from trex.stl.api import STLVmFixChecksumHw
54 from trex.stl.api import STLVmFixIpv4
55 from trex.stl.api import STLVmFlowVar
56 from trex.stl.api import STLVmFlowVarRepeatableRandom
57 from trex.stl.api import STLVmTupleGen
58 from trex.stl.api import STLVmWrFlowVar
59 from trex.stl.api import ThreeBytesField
60 from trex.stl.api import UDP
61 from trex.stl.api import XByteField
62
63 # pylint: enable=import-error
64
65 from .traffic_base import AbstractTrafficGenerator
66 from .traffic_base import TrafficGeneratorException
67 from . import traffic_utils as utils
68 from .traffic_utils import IMIX_AVG_L2_FRAME_SIZE
69 from .traffic_utils import IMIX_L2_SIZES
70 from .traffic_utils import IMIX_RATIOS
71
72 class VXLAN(Packet):
73     """VxLAN class."""
74
75     _VXLAN_FLAGS = ['R' * 27] + ['I'] + ['R' * 5]
76     name = "VXLAN"
77     fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS),
78                    ThreeBytesField("vni", 0),
79                    XByteField("reserved", 0x00)]
80
81     def mysummary(self):
82         """Summary."""
83         return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
84
85 class TRex(AbstractTrafficGenerator):
86     """TRex traffic generator driver."""
87
88     LATENCY_PPS = 1000
89     CHAIN_PG_ID_MASK = 0x007F
90     PORT_PG_ID_MASK = 0x0080
91     LATENCY_PG_ID_MASK = 0x0100
92
93     def __init__(self, traffic_client):
94         """Trex driver."""
95         AbstractTrafficGenerator.__init__(self, traffic_client)
96         self.client = None
97         self.id = count()
98         self.port_handle = []
99         self.chain_count = self.generator_config.service_chain_count
100         self.rates = []
101         self.capture_id = None
102         self.packet_list = []
103         self.l2_frame_size = 0
104
105     def get_version(self):
106         """Get the Trex version."""
107         return self.client.get_server_version() if self.client else ''
108
109     def get_pg_id(self, port, chain_id):
110         """Calculate the packet group IDs to use for a given port/stream type/chain_id.
111
112         port: 0 or 1
113         chain_id: identifies to which chain the pg_id is associated (0 to 255)
114         return: pg_id, lat_pg_id
115
116         We use a bit mask to set up the 3 fields:
117         0x007F: chain ID (8 bits for a max of 128 chains)
118         0x0080: port bit
119         0x0100: latency bit
120         """
121         pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
122         return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
123
124     def extract_stats(self, in_stats, ifstats):
125         """Extract stats from dict returned by Trex API.
126
127         :param in_stats: dict as returned by TRex api
128         """
129         utils.nan_replace(in_stats)
130         # LOG.debug(in_stats)
131
132         result = {}
133         # port_handles should have only 2 elements: [0, 1]
134         # so (1 - ph) will be the index for the far end port
135         for ph in self.port_handle:
136             stats = in_stats[ph]
137             far_end_stats = in_stats[1 - ph]
138             result[ph] = {
139                 'tx': {
140                     'total_pkts': cast_integer(stats['opackets']),
141                     'total_pkt_bytes': cast_integer(stats['obytes']),
142                     'pkt_rate': cast_integer(stats['tx_pps']),
143                     'pkt_bit_rate': cast_integer(stats['tx_bps'])
144                 },
145                 'rx': {
146                     'total_pkts': cast_integer(stats['ipackets']),
147                     'total_pkt_bytes': cast_integer(stats['ibytes']),
148                     'pkt_rate': cast_integer(stats['rx_pps']),
149                     'pkt_bit_rate': cast_integer(stats['rx_bps']),
150                     # how many pkts were dropped in RX direction
151                     # need to take the tx counter on the far end port
152                     'dropped_pkts': cast_integer(
153                         far_end_stats['opackets'] - stats['ipackets'])
154                 }
155             }
156             self.__combine_latencies(in_stats, result[ph]['rx'], ph)
157
158         total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
159         result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
160         # actual offered tx rate in bps
161         avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
162         total_tx_bps = utils.pps_to_bps(result["total_tx_rate"], avg_packet_size)
163         result['offered_tx_rate_bps'] = total_tx_bps
164
165         result.update(self.get_theoretical_rates(avg_packet_size))
166
167         result["flow_stats"] = in_stats["flow_stats"]
168         result["latency"] = in_stats["latency"]
169
170         # Merge HDRHistogram to have an overall value for all chains and ports
171         # (provided that the histogram exists in the stats returned by T-Rex)
172         # Of course, empty histograms will produce an empty (invalid) histogram.
173         try:
174             hdrh_list = []
175             if ifstats:
176                 for chain_id, _ in enumerate(ifstats):
177                     for ph in self.port_handle:
178                         _, lat_pg_id = self.get_pg_id(ph, chain_id)
179                         hdrh_list.append(
180                             HdrHistogram.decode(in_stats['latency'][lat_pg_id]['latency']['hdrh']))
181             else:
182                 for pg_id in in_stats['latency']:
183                     if pg_id != 'global':
184                         hdrh_list.append(
185                             HdrHistogram.decode(in_stats['latency'][pg_id]['latency']['hdrh']))
186
187             def add_hdrh(x, y):
188                 x.add(y)
189                 return x
190             decoded_hdrh = reduce(add_hdrh, hdrh_list)
191             result["overall_hdrh"] = HdrHistogram.encode(decoded_hdrh).decode('utf-8')
192         except KeyError:
193             pass
194
195         return result
196
197     def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
198         """Extract the aggregated stats for a given chain.
199
200         trex_stats: stats as returned by get_stats()
201         if_stats: a list of 2 interface stats to update (port 0 and 1)
202         latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
203                    latencies[p] is the latency for packets sent on port p
204                    if there are no latency streams, the Latency instances are not modified
205         chain_idx: chain index of the interface stats
206
207         The packet counts include normal and latency streams.
208
209         Trex returns flows stats as follows:
210
211         'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
212                    'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
213                    'rx_bytes': {0: nan, 1: nan, 'total': nan},
214                    'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
215                    'rx_pps': {0: 0, 1: 0, 'total': 0},
216                    'tx_bps': {0: 0, 1: 0, 'total': 0},
217                    'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
218                    'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
219                    'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
220                    'tx_pps': {0: 0, 1: 0, 'total': 0}},
221                1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
222                    'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
223                    'rx_bytes': {0: nan, 1: nan, 'total': nan},
224                    'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
225                    'rx_pps': {0: 0, 1: 0, 'total': 0},
226                    'tx_bps': {0: 0, 1: 0, 'total': 0},
227                    'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
228                    'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
229                    'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
230                    'tx_pps': {0: 0, 1: 0, 'total': 0}},
231                 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
232                 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
233                 'rx_bytes': {0: nan, 1: nan, 'total': nan},
234                 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
235                 'rx_pps': {0: 0, 1: 0, 'total': 0},
236                 'tx_bps': {0: 0, 1: 0, 'total': 0},
237                 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
238                 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
239                 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
240                 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
241
242         the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
243         get_pg_id() method.
244         packet counters for a given stream sent on port p are reported as:
245         - tx_pkts[p] on port p
246         - rx_pkts[1-p] on the far end port
247
248         This is a tricky/critical counter transposition operation because
249         the results are grouped by port (not by stream):
250         tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
251         rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
252         tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
253         rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
254
255         or using a more generic formula:
256         tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
257         rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
258
259         the second formula is equivalent to
260         rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
261
262         If there are latency streams, those same counters need to be added in the same way
263         """
264         def get_latency(lval):
265             try:
266                 return int(round(lval))
267             except ValueError:
268                 return 0
269
270         for ifs in if_stats:
271             ifs.tx = ifs.rx = 0
272         for port in range(2):
273             pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
274             for pid in [pg_id, lat_pg_id]:
275                 try:
276                     pg_stats = trex_stats['flow_stats'][pid]
277                     if_stats[port].tx += pg_stats['tx_pkts'][port]
278                     if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
279                 except KeyError:
280                     pass
281             try:
282                 lat = trex_stats['latency'][lat_pg_id]['latency']
283                 # dropped_pkts += lat['err_cntrs']['dropped']
284                 latencies[port].max_usec = get_latency(lat['total_max'])
285                 if math.isnan(lat['total_min']):
286                     latencies[port].min_usec = 0
287                     latencies[port].avg_usec = 0
288                 else:
289                     latencies[port].min_usec = get_latency(lat['total_min'])
290                     latencies[port].avg_usec = get_latency(lat['average'])
291                 # pick up the HDR histogram if present (otherwise will raise KeyError)
292                 latencies[port].hdrh = lat['hdrh']
293             except KeyError:
294                 pass
295
296     def __combine_latencies(self, in_stats, results, port_handle):
297         """Traverse TRex result dictionary and combines chosen latency stats.
298
299           example of latency dict returned by trex (2 chains):
300          'latency': {256: {'err_cntrs': {'dropped': 0,
301                                  'dup': 0,
302                                  'out_of_order': 0,
303                                  'seq_too_high': 0,
304                                  'seq_too_low': 0},
305                             'latency': {'average': 26.5,
306                                         'hdrh': u'HISTFAAAAEx4nJNpmSgz...bFRgxi',
307                                         'histogram': {20: 303,
308                                                         30: 320,
309                                                         40: 300,
310                                                         50: 73,
311                                                         60: 4,
312                                                         70: 1},
313                                         'jitter': 14,
314                                         'last_max': 63,
315                                         'total_max': 63,
316                                         'total_min': 20}},
317                     257: {'err_cntrs': {'dropped': 0,
318                                         'dup': 0,
319                                         'out_of_order': 0,
320                                         'seq_too_high': 0,
321                                         'seq_too_low': 0},
322                             'latency': {'average': 29.75,
323                                         'hdrh': u'HISTFAAAAEV4nJN...CALilDG0=',
324                                         'histogram': {20: 261,
325                                                         30: 431,
326                                                         40: 3,
327                                                         50: 80,
328                                                         60: 225},
329                                         'jitter': 23,
330                                         'last_max': 67,
331                                         'total_max': 67,
332                                         'total_min': 20}},
333                     384: {'err_cntrs': {'dropped': 0,
334                                         'dup': 0,
335                                         'out_of_order': 0,
336                                         'seq_too_high': 0,
337                                         'seq_too_low': 0},
338                             'latency': {'average': 18.0,
339                                         'hdrh': u'HISTFAAAADR4nJNpm...MjCwDDxAZG',
340                                         'histogram': {20: 987, 30: 14},
341                                         'jitter': 0,
342                                         'last_max': 34,
343                                         'total_max': 34,
344                                         'total_min': 20}},
345                     385: {'err_cntrs': {'dropped': 0,
346                                     'dup': 0,
347                                     'out_of_order': 0,
348                                     'seq_too_high': 0,
349                                     'seq_too_low': 0},
350                             'latency': {'average': 19.0,
351                                         'hdrh': u'HISTFAAAADR4nJNpm...NkYmJgDdagfK',
352                                         'histogram': {20: 989, 30: 11},
353                                         'jitter': 0,
354                                         'last_max': 38,
355                                         'total_max': 38,
356                                         'total_min': 20}},
357                     'global': {'bad_hdr': 0, 'old_flow': 0}},
358         """
359         total_max = 0
360         average = 0
361         total_min = float("inf")
362         for chain_id in range(self.chain_count):
363             try:
364                 _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
365                 lat = in_stats['latency'][lat_pg_id]['latency']
366                 # dropped_pkts += lat['err_cntrs']['dropped']
367                 total_max = max(lat['total_max'], total_max)
368                 total_min = min(lat['total_min'], total_min)
369                 average += lat['average']
370             except KeyError:
371                 pass
372         if total_min == float("inf"):
373             total_min = 0
374         results['min_delay_usec'] = total_min
375         results['max_delay_usec'] = total_max
376         results['avg_delay_usec'] = int(average / self.chain_count)
377
378     def _bind_vxlan(self):
379         bind_layers(UDP, VXLAN, dport=4789)
380         bind_layers(VXLAN, Ether)
381
382     def _create_pkt(self, stream_cfg, l2frame_size, disable_random_latency_flow=False):
383         """Create a packet of given size.
384
385         l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
386         """
387         # Trex will add the FCS field, so we need to remove 4 bytes from the l2 frame size
388         frame_size = int(l2frame_size) - 4
389         vm_param = []
390         if stream_cfg['vxlan'] is True:
391             self._bind_vxlan()
392             encap_level = '1'
393             pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
394             if stream_cfg['vtep_vlan'] is not None:
395                 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
396             pkt_base /= IP(src=stream_cfg['vtep_src_ip'], dst=stream_cfg['vtep_dst_ip'])
397             pkt_base /= UDP(sport=random.randint(1337, 32767), dport=4789)
398             pkt_base /= VXLAN(vni=stream_cfg['net_vni'])
399             pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
400             # need to randomize the outer header UDP src port based on flow
401             vxlan_udp_src_fv = STLVmFlowVar(
402                 name="vxlan_udp_src",
403                 min_value=1337,
404                 max_value=32767,
405                 size=2,
406                 op="random")
407             vm_param = [vxlan_udp_src_fv,
408                         STLVmWrFlowVar(fv_name="vxlan_udp_src", pkt_offset="UDP.sport")]
409         elif stream_cfg['mpls'] is True:
410             encap_level = '0'
411             pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
412             if stream_cfg['vtep_vlan'] is not None:
413                 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
414             if stream_cfg['mpls_outer_label'] is not None:
415                 pkt_base /= MPLS(label=stream_cfg['mpls_outer_label'], cos=1, s=0, ttl=255)
416             if stream_cfg['mpls_inner_label'] is not None:
417                 pkt_base /= MPLS(label=stream_cfg['mpls_inner_label'], cos=1, s=1, ttl=255)
418             #  Flow stats and MPLS labels randomization TBD
419             pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
420         else:
421             encap_level = '0'
422             pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
423
424         if stream_cfg['vlan_tag'] is not None:
425             pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
426
427         udp_args = {}
428         if stream_cfg['udp_src_port']:
429             udp_args['sport'] = int(stream_cfg['udp_src_port'])
430             if stream_cfg['udp_port_step'] == 'random':
431                 step = 1
432             else:
433                 step = stream_cfg['udp_port_step']
434             udp_args['sport_step'] = int(step)
435             udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
436         if stream_cfg['udp_dst_port']:
437             udp_args['dport'] = int(stream_cfg['udp_dst_port'])
438             if stream_cfg['udp_port_step'] == 'random':
439                 step = 1
440             else:
441                 step = stream_cfg['udp_port_step']
442             udp_args['dport_step'] = int(step)
443             udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
444
445         pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
446                     UDP(dport=udp_args['dport'], sport=udp_args['sport'])
447
448         # STLVmTupleGen need flow count >= cores used by TRex, if FC < cores we used STLVmFlowVar
449         if stream_cfg['ip_addrs_step'] == '0.0.0.1' and stream_cfg['udp_port_step'] == '1' and \
450                 stream_cfg['count'] >= self.generator_config.cores:
451             src_fv = STLVmTupleGen(ip_min=stream_cfg['ip_src_addr'],
452                                    ip_max=stream_cfg['ip_src_addr_max'],
453                                    port_min=udp_args['sport'],
454                                    port_max=udp_args['sport_max'],
455                                    name="tuple_src",
456                                    limit_flows=stream_cfg['count'])
457             dst_fv = STLVmTupleGen(ip_min=stream_cfg['ip_dst_addr'],
458                                    ip_max=stream_cfg['ip_dst_addr_max'],
459                                    port_min=udp_args['dport'],
460                                    port_max=udp_args['dport_max'],
461                                    name="tuple_dst",
462                                    limit_flows=stream_cfg['count'])
463             vm_param = [
464                 src_fv,
465                 STLVmWrFlowVar(fv_name="tuple_src.ip",
466                                pkt_offset="IP:{}.src".format(encap_level)),
467                 STLVmWrFlowVar(fv_name="tuple_src.port",
468                                pkt_offset="UDP:{}.sport".format(encap_level)),
469                 dst_fv,
470                 STLVmWrFlowVar(fv_name="tuple_dst.ip",
471                                pkt_offset="IP:{}.dst".format(encap_level)),
472                 STLVmWrFlowVar(fv_name="tuple_dst.port",
473                                pkt_offset="UDP:{}.dport".format(encap_level)),
474             ]
475         else:
476             if disable_random_latency_flow:
477                 src_fv_ip = STLVmFlowVar(
478                     name="ip_src",
479                     min_value=stream_cfg['ip_src_addr'],
480                     max_value=stream_cfg['ip_src_addr'],
481                     size=4)
482                 dst_fv_ip = STLVmFlowVar(
483                     name="ip_dst",
484                     min_value=stream_cfg['ip_dst_addr'],
485                     max_value=stream_cfg['ip_dst_addr'],
486                     size=4)
487             elif stream_cfg['ip_addrs_step'] == 'random':
488                 src_fv_ip = STLVmFlowVarRepeatableRandom(
489                     name="ip_src",
490                     min_value=stream_cfg['ip_src_addr'],
491                     max_value=stream_cfg['ip_src_addr_max'],
492                     size=4,
493                     seed=random.randint(0, 32767),
494                     limit=stream_cfg['ip_src_count'])
495                 dst_fv_ip = STLVmFlowVarRepeatableRandom(
496                     name="ip_dst",
497                     min_value=stream_cfg['ip_dst_addr'],
498                     max_value=stream_cfg['ip_dst_addr_max'],
499                     size=4,
500                     seed=random.randint(0, 32767),
501                     limit=stream_cfg['ip_dst_count'])
502             else:
503                 src_fv_ip = STLVmFlowVar(
504                     name="ip_src",
505                     min_value=stream_cfg['ip_src_addr'],
506                     max_value=stream_cfg['ip_src_addr_max'],
507                     size=4,
508                     op="inc",
509                     step=stream_cfg['ip_addrs_step'])
510                 dst_fv_ip = STLVmFlowVar(
511                     name="ip_dst",
512                     min_value=stream_cfg['ip_dst_addr'],
513                     max_value=stream_cfg['ip_dst_addr_max'],
514                     size=4,
515                     op="inc",
516                     step=stream_cfg['ip_addrs_step'])
517
518             if disable_random_latency_flow:
519                 src_fv_port = STLVmFlowVar(
520                     name="p_src",
521                     min_value=udp_args['sport'],
522                     max_value=udp_args['sport'],
523                     size=2)
524                 dst_fv_port = STLVmFlowVar(
525                     name="p_dst",
526                     min_value=udp_args['dport'],
527                     max_value=udp_args['dport'],
528                     size=2)
529             elif stream_cfg['udp_port_step'] == 'random':
530                 src_fv_port = STLVmFlowVarRepeatableRandom(
531                     name="p_src",
532                     min_value=udp_args['sport'],
533                     max_value=udp_args['sport_max'],
534                     size=2,
535                     seed=random.randint(0, 32767),
536                     limit=stream_cfg['udp_src_count'])
537                 dst_fv_port = STLVmFlowVarRepeatableRandom(
538                     name="p_dst",
539                     min_value=udp_args['dport'],
540                     max_value=udp_args['dport_max'],
541                     size=2,
542                     seed=random.randint(0, 32767),
543                     limit=stream_cfg['udp_dst_count'])
544             else:
545                 src_fv_port = STLVmFlowVar(
546                     name="p_src",
547                     min_value=udp_args['sport'],
548                     max_value=udp_args['sport_max'],
549                     size=2,
550                     op="inc",
551                     step=udp_args['sport_step'])
552                 dst_fv_port = STLVmFlowVar(
553                     name="p_dst",
554                     min_value=udp_args['dport'],
555                     max_value=udp_args['dport_max'],
556                     size=2,
557                     op="inc",
558                     step=udp_args['dport_step'])
559             vm_param = [
560                 src_fv_ip,
561                 STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
562                 src_fv_port,
563                 STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
564                 dst_fv_ip,
565                 STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
566                 dst_fv_port,
567                 STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
568             ]
569         # Use HW Offload to calculate the outter IP/UDP packet
570         vm_param.append(STLVmFixChecksumHw(l3_offset="IP:0",
571                                            l4_offset="UDP:0",
572                                            l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP))
573         # Use software to fix the inner IP/UDP payload for VxLAN packets
574         if int(encap_level):
575             vm_param.append(STLVmFixIpv4(offset="IP:1"))
576         pad = max(0, frame_size - len(pkt_base)) * 'x'
577
578         return STLPktBuilder(pkt=pkt_base / pad,
579                              vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
580
581     def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
582                          e2e=False):
583         """Create a list of streams corresponding to a given chain and stream config.
584
585         port: port where the streams originate (0 or 1)
586         chain_id: the chain to which the streams are associated to
587         stream_cfg: stream configuration
588         l2frame: L2 frame size (including 4-byte FCS) or 'IMIX'
589         latency: if True also create a latency stream
590         e2e: True if performing "end to end" connectivity check
591         """
592         streams = []
593         pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
594         if l2frame == 'IMIX':
595             for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
596                 pkt = self._create_pkt(stream_cfg, l2_frame_size)
597                 if e2e or stream_cfg['mpls']:
598                     streams.append(STLStream(packet=pkt,
599                                              mode=STLTXCont(pps=ratio)))
600                 else:
601                     if stream_cfg['vxlan'] is True:
602                         streams.append(STLStream(packet=pkt,
603                                                  flow_stats=STLFlowStats(pg_id=pg_id,
604                                                                          vxlan=True)
605                                                     if not self.config.no_flow_stats else None,
606                                                  mode=STLTXCont(pps=ratio)))
607                     else:
608                         streams.append(STLStream(packet=pkt,
609                                                  flow_stats=STLFlowStats(pg_id=pg_id)
610                                                     if not self.config.no_flow_stats else None,
611                                                  mode=STLTXCont(pps=ratio)))
612
613             if latency:
614                 # for IMIX, the latency packets have the average IMIX packet size
615                 if stream_cfg['ip_addrs_step'] == 'random' or \
616                         stream_cfg['udp_port_step'] == 'random':
617                     # Force latency flow to only one flow to avoid creating flows
618                     # over requested flow count
619                     pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE, True)
620                 else:
621                     pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
622
623         else:
624             l2frame_size = int(l2frame)
625             pkt = self._create_pkt(stream_cfg, l2frame_size)
626             if e2e or stream_cfg['mpls']:
627                 streams.append(STLStream(packet=pkt,
628                                          # Flow stats is disabled for MPLS now
629                                          # flow_stats=STLFlowStats(pg_id=pg_id),
630                                          mode=STLTXCont()))
631             else:
632                 if stream_cfg['vxlan'] is True:
633                     streams.append(STLStream(packet=pkt,
634                                              flow_stats=STLFlowStats(pg_id=pg_id,
635                                                                      vxlan=True)
636                                                 if not self.config.no_flow_stats else None,
637                                              mode=STLTXCont()))
638                 else:
639                     streams.append(STLStream(packet=pkt,
640                                              flow_stats=STLFlowStats(pg_id=pg_id)
641                                                 if not self.config.no_flow_stats else None,
642                                              mode=STLTXCont()))
643             # for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
644             # without vlan, the min l2 frame size is 64
645             # with vlan it is 68
646             # This only applies to the latency stream
647             if latency:
648                 if stream_cfg['vlan_tag'] and l2frame_size < 68:
649                     l2frame_size = 68
650                 if stream_cfg['ip_addrs_step'] == 'random' or \
651                         stream_cfg['udp_port_step'] == 'random':
652                         # Force latency flow to only one flow to avoid creating flows
653                         # over requested flow count
654                     pkt = self._create_pkt(stream_cfg, l2frame_size, True)
655                 else:
656                     pkt = self._create_pkt(stream_cfg, l2frame_size)
657
658         if latency:
659             if self.config.no_latency_stats:
660                 LOG.info("Latency flow statistics are disabled.")
661             if stream_cfg['vxlan'] is True:
662                 streams.append(STLStream(packet=pkt,
663                                          flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
664                                                                         vxlan=True)
665                                             if not self.config.no_latency_stats else None,
666                                          mode=STLTXCont(pps=self.LATENCY_PPS)))
667             else:
668                 streams.append(STLStream(packet=pkt,
669                                          flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
670                                             if not self.config.no_latency_stats else None,
671                                          mode=STLTXCont(pps=self.LATENCY_PPS)))
672         return streams
673
674     @timeout(5)
675     def __connect(self, client):
676         client.connect()
677
678     def __local_server_status(self):
679         """ The TRex server may have started but failed initializing... and stopped.
680         This piece of code is especially designed to address
681         the case when a fatal failure occurs on a DPDK init call.
682         The TRex algorihm should be revised to include some missing timeouts (?)
683         status returned:
684           0: no error detected
685           1: fatal error detected - should lead to exiting the run
686           2: error detected that could be solved by starting again
687         The diagnostic is based on parsing the local trex log file (improvable)
688         """
689         status = 0
690         message = None
691         failure = None
692         exited = None
693         cause = None
694         error = None
695         before = None
696         after = None
697         last = None
698         try:
699             with open('/tmp/trex.log', 'r') as trex_log:
700                 for _line in trex_log:
701                     line = _line.strip()
702                     if line.startswith('Usage:'):
703                         break
704                     if 'ports are bound' in line:
705                         continue
706                     if 'please wait' in line:
707                         continue
708                     if 'exit' in line.lower():
709                         exited = line
710                     elif 'cause' in line.lower():
711                         cause = line
712                     elif 'fail' in line.lower():
713                         failure = line
714                     elif 'msg' in line.lower():
715                         message = line
716                     elif (error is not None) and line:
717                         after = line
718                     elif line.startswith('Error:') or line.startswith('ERROR'):
719                         error = line
720                         before = last
721                     last = line
722         except FileNotFoundError:
723             pass
724         if exited is not None:
725             status = 1
726             LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', exited)
727             if cause is not None:
728                 LOG.info("TRex [cont'd] %s", cause)
729             if failure is not None:
730                 LOG.info("TRex [cont'd] %s", failure)
731             if message is not None:
732                 LOG.info("TRex [cont'd] %s", message)
733                 if 'not supported yet' in message.lower():
734                     LOG.info("TRex [cont'd] Try starting again!")
735                     status = 2
736         elif error is not None:
737             status = 1
738             LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', error)
739             if after is not None:
740                 LOG.info("TRex [cont'd] %s", after)
741             elif before is not None:
742                 LOG.info("TRex [cont'd] %s", before)
743         return status
744
745     def __connect_after_start(self):
746         # after start, Trex may take a bit of time to initialize
747         # so we need to retry a few times
748         # we try to capture recoverable error cases (checking status)
749         status = 0
750         for it in range(self.config.generic_retry_count):
751             try:
752                 time.sleep(1)
753                 self.client.connect()
754                 break
755             except Exception as ex:
756                 if it == (self.config.generic_retry_count - 1):
757                     raise
758                 status = self.__local_server_status()
759                 if status > 0:
760                     # No need to wait anymore, something went wrong and TRex exited
761                     if status == 1:
762                         LOG.info("\x1b[1m%s\x1b[0m", 'TRex failed starting!')
763                         print("More information? Try the command: "
764                             + "\x1b[1mnfvbench --show-trex-log\x1b[0m")
765                         sys.exit(0)
766                     if status == 2:
767                         # a new start will follow
768                         return status
769                 LOG.info("Retrying connection to TRex (%s)...", ex.msg)
770         return status
771
772     def connect(self):
773         """Connect to the TRex server."""
774         status = 0
775         server_ip = self.generator_config.ip
776         LOG.info("Connecting to TRex (%s)...", server_ip)
777
778         # Connect to TRex server
779         self.client = STLClient(server=server_ip, sync_port=self.generator_config.zmq_rpc_port,
780                                 async_port=self.generator_config.zmq_pub_port)
781         try:
782             self.__connect(self.client)
783             if server_ip == '127.0.0.1':
784                 config_updated = self.__check_config()
785                 if config_updated or self.config.restart:
786                     status = self.__restart()
787         except (TimeoutError, STLError) as e:
788             if server_ip == '127.0.0.1':
789                 status = self.__start_local_server()
790             else:
791                 raise TrafficGeneratorException(e.message) from e
792
793         if status == 2:
794             # Workaround in case of a failed TRex server initialization
795             # we try to start it again (twice maximum)
796             # which may allow low level initialization to complete.
797             if self.__start_local_server() == 2:
798                 self.__start_local_server()
799
800         ports = list(self.generator_config.ports)
801         self.port_handle = ports
802         # Prepare the ports
803         self.client.reset(ports)
804         # Read HW information from each port
805         # this returns an array of dict (1 per port)
806         """
807         Example of output for Intel XL710
808         [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
809           'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
810           u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
811           u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
812           u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
813           'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
814           u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
815           'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
816           'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
817           'layer_mode': 'Ethernet', u'numa': 0}, ...]
818         """
819         self.port_info = self.client.get_port_info(ports)
820         LOG.info('Connected to TRex')
821         for id, port in enumerate(self.port_info):
822             LOG.info('   Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
823                      id, port['description'], port['speed'], port['src_mac'],
824                      port['pci_addr'], port['driver'])
825         # Make sure the 2 ports have the same speed
826         if self.port_info[0]['speed'] != self.port_info[1]['speed']:
827             raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
828                                             (self.port_info[0]['speed'],
829                                              self.port_info[1]['speed']))
830
831     def __start_local_server(self):
832         try:
833             LOG.info("Starting TRex ...")
834             self.__start_server()
835             status = self.__connect_after_start()
836         except (TimeoutError, STLError) as e:
837             LOG.error('Cannot connect to TRex')
838             LOG.error(traceback.format_exc())
839             logpath = '/tmp/trex.log'
840             if os.path.isfile(logpath):
841                 # Wait for TRex to finish writing error message
842                 last_size = 0
843                 for _ in range(self.config.generic_retry_count):
844                     size = os.path.getsize(logpath)
845                     if size == last_size:
846                         # probably not writing anymore
847                         break
848                     last_size = size
849                     time.sleep(1)
850                 with open(logpath, 'r') as f:
851                     message = f.read()
852             else:
853                 message = e.message
854             raise TrafficGeneratorException(message) from e
855         return status
856
857     def __start_server(self):
858         server = TRexTrafficServer()
859         server.run_server(self.generator_config)
860
861     def __check_config(self):
862         server = TRexTrafficServer()
863         return server.check_config_updated(self.generator_config)
864
865     def __restart(self):
866         LOG.info("Restarting TRex ...")
867         self.__stop_server()
868         # Wait for server stopped
869         for _ in range(self.config.generic_retry_count):
870             time.sleep(1)
871             if not self.client.is_connected():
872                 LOG.info("TRex is stopped...")
873                 break
874         # Start and report a possible failure
875         return self.__start_local_server()
876
877     def __stop_server(self):
878         if self.generator_config.ip == '127.0.0.1':
879             ports = self.client.get_acquired_ports()
880             LOG.info('Release ports %s and stopping TRex...', ports)
881             try:
882                 if ports:
883                     self.client.release(ports=ports)
884                 self.client.server_shutdown()
885             except STLError as e:
886                 LOG.warning('Unable to stop TRex. Error: %s', e)
887         else:
888             LOG.info('Using remote TRex. Unable to stop TRex')
889
890     def resolve_arp(self):
891         """Resolve all configured remote IP addresses.
892
893         return: None if ARP failed to resolve for all IP addresses
894                 else a dict of list of dest macs indexed by port#
895                 the dest macs in the list are indexed by the chain id
896         """
897         self.client.set_service_mode(ports=self.port_handle)
898         LOG.info('Polling ARP until successful...')
899         arp_dest_macs = {}
900         for port, device in zip(self.port_handle, self.generator_config.devices):
901             # there should be 1 stream config per chain
902             stream_configs = device.get_stream_configs()
903             chain_count = len(stream_configs)
904             ctx = self.client.create_service_ctx(port=port)
905             # all dest macs on this port indexed by chain ID
906             dst_macs = [None] * chain_count
907             dst_macs_count = 0
908             # the index in the list is the chain id
909             if self.config.vxlan or self.config.mpls:
910                 arps = [
911                     ServiceARP(ctx,
912                                src_ip=device.vtep_src_ip,
913                                dst_ip=device.vtep_dst_ip,
914                                vlan=device.vtep_vlan)
915                     for cfg in stream_configs
916                 ]
917             else:
918                 arps = [
919                     ServiceARP(ctx,
920                                src_ip=cfg['ip_src_tg_gw'],
921                                dst_ip=cfg['mac_discovery_gw'],
922                                # will be None if no vlan tagging
923                                vlan=cfg['vlan_tag'])
924                     for cfg in stream_configs
925                 ]
926
927             for attempt in range(self.config.generic_retry_count):
928                 try:
929                     ctx.run(arps)
930                 except STLError:
931                     LOG.error(traceback.format_exc())
932                     continue
933
934                 unresolved = []
935                 for chain_id, mac in enumerate(dst_macs):
936                     if not mac:
937                         arp_record = arps[chain_id].get_record()
938                         if arp_record.dst_mac:
939                             dst_macs[chain_id] = arp_record.dst_mac
940                             dst_macs_count += 1
941                             LOG.info('   ARP: port=%d chain=%d src IP=%s dst IP=%s -> MAC=%s',
942                                      port, chain_id,
943                                      arp_record.src_ip,
944                                      arp_record.dst_ip, arp_record.dst_mac)
945                         else:
946                             unresolved.append(arp_record.dst_ip)
947                 if dst_macs_count == chain_count:
948                     arp_dest_macs[port] = dst_macs
949                     LOG.info('ARP resolved successfully for port %s', port)
950                     break
951
952                 retry = attempt + 1
953                 LOG.info('Retrying ARP for: %s (retry %d/%d)',
954                          unresolved, retry, self.config.generic_retry_count)
955                 if retry < self.config.generic_retry_count:
956                     time.sleep(self.config.generic_poll_sec)
957             else:
958                 LOG.error('ARP timed out for port %s (resolved %d out of %d)',
959                           port,
960                           dst_macs_count,
961                           chain_count)
962                 break
963
964         # A traffic capture may have been started (from a T-Rex console) at this time.
965         # If asked so, we keep the service mode enabled here, and disable it otherwise.
966         #  | Disabling the service mode while a capture is in progress
967         #  | would cause the application to stop/crash with an error.
968         if not self.config.service_mode:
969             self.client.set_service_mode(ports=self.port_handle, enabled=False)
970         if len(arp_dest_macs) == len(self.port_handle):
971             return arp_dest_macs
972         return None
973
974     def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
975         """Check if rate provided by user is above requirements. Applies only if latency is True."""
976         intf_speed = self.generator_config.intf_speed
977         if latency:
978             if bidirectional:
979                 mult = 2
980                 total_rate = 0
981                 for rate in rates:
982                     r = utils.convert_rates(l2frame_size, rate, intf_speed)
983                     total_rate += int(r['rate_pps'])
984             else:
985                 mult = 1
986                 r = utils.convert_rates(l2frame_size, rates[0], intf_speed)
987                 total_rate = int(r['rate_pps'])
988             # rate must be enough for latency stream and at least 1 pps for base stream per chain
989             required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
990             result = utils.convert_rates(l2frame_size,
991                                          {'rate_pps': required_rate},
992                                          intf_speed * mult)
993             result['result'] = total_rate >= required_rate
994             return result
995
996         return {'result': True}
997
998     def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
999         """Program all the streams in Trex server.
1000
1001         l2frame_size: L2 frame size or IMIX
1002         rates: a list of 2 rates to run each direction
1003                each rate is a dict like {'rate_pps': '10kpps'}
1004         bidirectional: True if bidirectional
1005         latency: True if latency measurement is needed
1006         e2e: True if performing "end to end" connectivity check
1007         """
1008         if self.config.no_flow_stats:
1009             LOG.info("Traffic flow statistics are disabled.")
1010         r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
1011         if not r['result']:
1012             raise TrafficGeneratorException(
1013                 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
1014                 .format(pps=r['rate_pps'],
1015                         bps=r['rate_bps'],
1016                         load=r['rate_percent']))
1017         self.l2_frame_size = l2frame_size
1018         # a dict of list of streams indexed by port#
1019         # in case of fixed size, has self.chain_count * 2 * 2 streams
1020         # (1 normal + 1 latency stream per direction per chain)
1021         # for IMIX, has self.chain_count * 2 * 4 streams
1022         # (3 normal + 1 latency stream per direction per chain)
1023         streamblock = {}
1024         for port in self.port_handle:
1025             streamblock[port] = []
1026         stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
1027         if self.generator_config.ip_addrs_step == 'random' \
1028                 or self.generator_config.gen_config.udp_port_step == 'random':
1029             LOG.warning("Using random step, the number of flows can be less than "
1030                         "the requested number of flows due to repeatable multivariate random "
1031                         "generation which can reproduce the same pattern of values")
1032         self.rates = [utils.to_rate_str(rate) for rate in rates]
1033         for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
1034             streamblock[0].extend(self.generate_streams(self.port_handle[0],
1035                                                         chain_id,
1036                                                         fwd_stream_cfg,
1037                                                         l2frame_size,
1038                                                         latency=latency,
1039                                                         e2e=e2e))
1040             if len(self.rates) > 1:
1041                 streamblock[1].extend(self.generate_streams(self.port_handle[1],
1042                                                             chain_id,
1043                                                             rev_stream_cfg,
1044                                                             l2frame_size,
1045                                                             latency=bidirectional and latency,
1046                                                             e2e=e2e))
1047
1048         for port in self.port_handle:
1049             if self.config.vxlan:
1050                 self.client.set_port_attr(ports=port, vxlan_fs=[4789])
1051             else:
1052                 self.client.set_port_attr(ports=port, vxlan_fs=None)
1053             self.client.add_streams(streamblock[port], ports=port)
1054             LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
1055
1056     def clear_streamblock(self):
1057         """Clear all streams from TRex."""
1058         self.rates = []
1059         self.client.reset(self.port_handle)
1060         LOG.info('Cleared all existing streams')
1061
1062     def get_stats(self, if_stats=None):
1063         """Get stats from Trex."""
1064         stats = self.client.get_stats()
1065         return self.extract_stats(stats, if_stats)
1066
1067     def get_macs(self):
1068         """Return the Trex local port MAC addresses.
1069
1070         return: a list of MAC addresses indexed by the port#
1071         """
1072         return [port['src_mac'] for port in self.port_info]
1073
1074     def get_port_speed_gbps(self):
1075         """Return the Trex local port MAC addresses.
1076
1077         return: a list of speed in Gbps indexed by the port#
1078         """
1079         return [port['speed'] for port in self.port_info]
1080
1081     def clear_stats(self):
1082         """Clear all stats in the traffic gneerator."""
1083         if self.port_handle:
1084             self.client.clear_stats()
1085
1086     def start_traffic(self):
1087         """Start generating traffic in all ports."""
1088         for port, rate in zip(self.port_handle, self.rates):
1089             self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
1090
1091     def stop_traffic(self):
1092         """Stop generating traffic."""
1093         self.client.stop(ports=self.port_handle)
1094
1095     def start_capture(self):
1096         """Capture all packets on both ports that are unicast to us."""
1097         if self.capture_id:
1098             self.stop_capture()
1099         # Need to filter out unwanted packets so we do not end up counting
1100         # src MACs of frames that are not unicast to us
1101         src_mac_list = self.get_macs()
1102         bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
1103         # ports must be set in service in order to enable capture
1104         self.client.set_service_mode(ports=self.port_handle)
1105         self.capture_id = self.client.start_capture \
1106             (rx_ports=self.port_handle, bpf_filter=bpf_filter)
1107
1108     def fetch_capture_packets(self):
1109         """Fetch capture packets in capture mode."""
1110         if self.capture_id:
1111             self.packet_list = []
1112             self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
1113                                               output=self.packet_list)
1114
1115     def stop_capture(self):
1116         """Stop capturing packets."""
1117         if self.capture_id:
1118             self.client.stop_capture(capture_id=self.capture_id['id'])
1119             self.capture_id = None
1120             # A traffic capture may have been started (from a T-Rex console) at this time.
1121             # If asked so, we keep the service mode enabled here, and disable it otherwise.
1122             #  | Disabling the service mode while a capture is in progress
1123             #  | would cause the application to stop/crash with an error.
1124             if not self.config.service_mode:
1125                 self.client.set_service_mode(ports=self.port_handle, enabled=False)
1126
1127     def cleanup(self):
1128         """Cleanup Trex driver."""
1129         if self.client:
1130             try:
1131                 self.client.reset(self.port_handle)
1132                 self.client.disconnect()
1133             except STLError:
1134                 # TRex does not like a reset while in disconnected state
1135                 pass
1136
1137     def set_service_mode(self, enabled=True):
1138         """Enable/disable the 'service' mode."""
1139         self.client.set_service_mode(ports=self.port_handle, enabled=enabled)