NFVBENCH-189: Add a fix to work around the i40e_VF port initialization issue
[nfvbench.git] / nfvbench / traffic_gen / trex_gen.py
1 # Copyright 2016 Cisco Systems, Inc.  All rights reserved.
2 #
3 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
4 #    not use this file except in compliance with the License. You may obtain
5 #    a copy of the License at
6 #
7 #         http://www.apache.org/licenses/LICENSE-2.0
8 #
9 #    Unless required by applicable law or agreed to in writing, software
10 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 #    License for the specific language governing permissions and limitations
13 #    under the License.
14 """Driver module for TRex traffic generator."""
15
16 import math
17 import os
18 import sys
19 import random
20 import time
21 import traceback
22 from functools import reduce
23
24 from itertools import count
25 # pylint: disable=import-error
26 from scapy.contrib.mpls import MPLS  # flake8: noqa
27 # pylint: enable=import-error
28 from nfvbench.log import LOG
29 from nfvbench.traffic_server import TRexTrafficServer
30 from nfvbench.utils import cast_integer
31 from nfvbench.utils import timeout
32 from nfvbench.utils import TimeoutError
33
34 from hdrh.histogram import HdrHistogram
35
36 # pylint: disable=import-error
37 from trex.common.services.trex_service_arp import ServiceARP
38 from trex.stl.api import bind_layers
39 from trex.stl.api import CTRexVmInsFixHwCs
40 from trex.stl.api import Dot1Q
41 from trex.stl.api import Ether
42 from trex.stl.api import FlagsField
43 from trex.stl.api import IP
44 from trex.stl.api import Packet
45 from trex.stl.api import STLClient
46 from trex.stl.api import STLError
47 from trex.stl.api import STLFlowLatencyStats
48 from trex.stl.api import STLFlowStats
49 from trex.stl.api import STLPktBuilder
50 from trex.stl.api import STLScVmRaw
51 from trex.stl.api import STLStream
52 from trex.stl.api import STLTXCont
53 from trex.stl.api import STLVmFixChecksumHw
54 from trex.stl.api import STLVmFixIpv4
55 from trex.stl.api import STLVmFlowVar
56 from trex.stl.api import STLVmFlowVarRepeatableRandom
57 from trex.stl.api import STLVmTupleGen
58 from trex.stl.api import STLVmWrFlowVar
59 from trex.stl.api import ThreeBytesField
60 from trex.stl.api import UDP
61 from trex.stl.api import XByteField
62
63 # pylint: enable=import-error
64
65 from .traffic_base import AbstractTrafficGenerator
66 from .traffic_base import TrafficGeneratorException
67 from . import traffic_utils as utils
68 from .traffic_utils import IMIX_AVG_L2_FRAME_SIZE
69 from .traffic_utils import IMIX_L2_SIZES
70 from .traffic_utils import IMIX_RATIOS
71
72 class VXLAN(Packet):
73     """VxLAN class."""
74
75     _VXLAN_FLAGS = ['R' * 27] + ['I'] + ['R' * 5]
76     name = "VXLAN"
77     fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS),
78                    ThreeBytesField("vni", 0),
79                    XByteField("reserved", 0x00)]
80
81     def mysummary(self):
82         """Summary."""
83         return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
84
85 class TRex(AbstractTrafficGenerator):
86     """TRex traffic generator driver."""
87
88     LATENCY_PPS = 1000
89     CHAIN_PG_ID_MASK = 0x007F
90     PORT_PG_ID_MASK = 0x0080
91     LATENCY_PG_ID_MASK = 0x0100
92
93     def __init__(self, traffic_client):
94         """Trex driver."""
95         AbstractTrafficGenerator.__init__(self, traffic_client)
96         self.client = None
97         self.id = count()
98         self.port_handle = []
99         self.chain_count = self.generator_config.service_chain_count
100         self.rates = []
101         self.capture_id = None
102         self.packet_list = []
103         self.l2_frame_size = 0
104
105     def get_version(self):
106         """Get the Trex version."""
107         return self.client.get_server_version() if self.client else ''
108
109     def get_pg_id(self, port, chain_id):
110         """Calculate the packet group IDs to use for a given port/stream type/chain_id.
111
112         port: 0 or 1
113         chain_id: identifies to which chain the pg_id is associated (0 to 255)
114         return: pg_id, lat_pg_id
115
116         We use a bit mask to set up the 3 fields:
117         0x007F: chain ID (8 bits for a max of 128 chains)
118         0x0080: port bit
119         0x0100: latency bit
120         """
121         pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
122         return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
123
124     def extract_stats(self, in_stats, ifstats):
125         """Extract stats from dict returned by Trex API.
126
127         :param in_stats: dict as returned by TRex api
128         """
129         utils.nan_replace(in_stats)
130         # LOG.debug(in_stats)
131
132         result = {}
133         # port_handles should have only 2 elements: [0, 1]
134         # so (1 - ph) will be the index for the far end port
135         for ph in self.port_handle:
136             stats = in_stats[ph]
137             far_end_stats = in_stats[1 - ph]
138             result[ph] = {
139                 'tx': {
140                     'total_pkts': cast_integer(stats['opackets']),
141                     'total_pkt_bytes': cast_integer(stats['obytes']),
142                     'pkt_rate': cast_integer(stats['tx_pps']),
143                     'pkt_bit_rate': cast_integer(stats['tx_bps'])
144                 },
145                 'rx': {
146                     'total_pkts': cast_integer(stats['ipackets']),
147                     'total_pkt_bytes': cast_integer(stats['ibytes']),
148                     'pkt_rate': cast_integer(stats['rx_pps']),
149                     'pkt_bit_rate': cast_integer(stats['rx_bps']),
150                     # how many pkts were dropped in RX direction
151                     # need to take the tx counter on the far end port
152                     'dropped_pkts': cast_integer(
153                         far_end_stats['opackets'] - stats['ipackets'])
154                 }
155             }
156             self.__combine_latencies(in_stats, result[ph]['rx'], ph)
157
158         total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
159         result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
160         # actual offered tx rate in bps
161         avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
162         total_tx_bps = utils.pps_to_bps(result["total_tx_rate"], avg_packet_size)
163         result['offered_tx_rate_bps'] = total_tx_bps
164
165         result.update(self.get_theoretical_rates(avg_packet_size))
166
167         result["flow_stats"] = in_stats["flow_stats"]
168         result["latency"] = in_stats["latency"]
169
170         # Merge HDRHistogram to have an overall value for all chains and ports
171         try:
172             hdrh_list = []
173             if ifstats:
174                 for chain_id, _ in enumerate(ifstats):
175                     for ph in self.port_handle:
176                         _, lat_pg_id = self.get_pg_id(ph, chain_id)
177                         hdrh_list.append(
178                             HdrHistogram.decode(in_stats['latency'][lat_pg_id]['latency']['hdrh']))
179             else:
180                 for pg_id in in_stats['latency']:
181                     if pg_id != 'global':
182                         hdrh_list.append(
183                             HdrHistogram.decode(in_stats['latency'][pg_id]['latency']['hdrh']))
184
185             def add_hdrh(x, y):
186                 x.add(y)
187                 return x
188             decoded_hdrh = reduce(add_hdrh, hdrh_list)
189             result["hdrh"] = HdrHistogram.encode(decoded_hdrh).decode('utf-8')
190         except KeyError:
191             pass
192
193         return result
194
195     def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
196         """Extract the aggregated stats for a given chain.
197
198         trex_stats: stats as returned by get_stats()
199         if_stats: a list of 2 interface stats to update (port 0 and 1)
200         latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
201                    latencies[p] is the latency for packets sent on port p
202                    if there are no latency streams, the Latency instances are not modified
203         chain_idx: chain index of the interface stats
204
205         The packet counts include normal and latency streams.
206
207         Trex returns flows stats as follows:
208
209         'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
210                    'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
211                    'rx_bytes': {0: nan, 1: nan, 'total': nan},
212                    'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
213                    'rx_pps': {0: 0, 1: 0, 'total': 0},
214                    'tx_bps': {0: 0, 1: 0, 'total': 0},
215                    'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
216                    'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
217                    'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
218                    'tx_pps': {0: 0, 1: 0, 'total': 0}},
219                1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
220                    'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
221                    'rx_bytes': {0: nan, 1: nan, 'total': nan},
222                    'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
223                    'rx_pps': {0: 0, 1: 0, 'total': 0},
224                    'tx_bps': {0: 0, 1: 0, 'total': 0},
225                    'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
226                    'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
227                    'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
228                    'tx_pps': {0: 0, 1: 0, 'total': 0}},
229                 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
230                 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
231                 'rx_bytes': {0: nan, 1: nan, 'total': nan},
232                 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
233                 'rx_pps': {0: 0, 1: 0, 'total': 0},
234                 'tx_bps': {0: 0, 1: 0, 'total': 0},
235                 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
236                 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
237                 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
238                 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
239
240         the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
241         get_pg_id() method.
242         packet counters for a given stream sent on port p are reported as:
243         - tx_pkts[p] on port p
244         - rx_pkts[1-p] on the far end port
245
246         This is a tricky/critical counter transposition operation because
247         the results are grouped by port (not by stream):
248         tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
249         rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
250         tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
251         rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
252
253         or using a more generic formula:
254         tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
255         rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
256
257         the second formula is equivalent to
258         rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
259
260         If there are latency streams, those same counters need to be added in the same way
261         """
262         def get_latency(lval):
263             try:
264                 return int(round(lval))
265             except ValueError:
266                 return 0
267
268         for ifs in if_stats:
269             ifs.tx = ifs.rx = 0
270         for port in range(2):
271             pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
272             for pid in [pg_id, lat_pg_id]:
273                 try:
274                     pg_stats = trex_stats['flow_stats'][pid]
275                     if_stats[port].tx += pg_stats['tx_pkts'][port]
276                     if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
277                 except KeyError:
278                     pass
279             try:
280                 lat = trex_stats['latency'][lat_pg_id]['latency']
281                 # dropped_pkts += lat['err_cntrs']['dropped']
282                 latencies[port].max_usec = get_latency(lat['total_max'])
283                 if math.isnan(lat['total_min']):
284                     latencies[port].min_usec = 0
285                     latencies[port].avg_usec = 0
286                 else:
287                     latencies[port].min_usec = get_latency(lat['total_min'])
288                     latencies[port].avg_usec = get_latency(lat['average'])
289                 # pick up the HDR histogram if present (otherwise will raise KeyError)
290                 latencies[port].hdrh = lat['hdrh']
291             except KeyError:
292                 pass
293
294     def __combine_latencies(self, in_stats, results, port_handle):
295         """Traverse TRex result dictionary and combines chosen latency stats.
296
297           example of latency dict returned by trex (2 chains):
298          'latency': {256: {'err_cntrs': {'dropped': 0,
299                                  'dup': 0,
300                                  'out_of_order': 0,
301                                  'seq_too_high': 0,
302                                  'seq_too_low': 0},
303                             'latency': {'average': 26.5,
304                                         'hdrh': u'HISTFAAAAEx4nJNpmSgz...bFRgxi',
305                                         'histogram': {20: 303,
306                                                         30: 320,
307                                                         40: 300,
308                                                         50: 73,
309                                                         60: 4,
310                                                         70: 1},
311                                         'jitter': 14,
312                                         'last_max': 63,
313                                         'total_max': 63,
314                                         'total_min': 20}},
315                     257: {'err_cntrs': {'dropped': 0,
316                                         'dup': 0,
317                                         'out_of_order': 0,
318                                         'seq_too_high': 0,
319                                         'seq_too_low': 0},
320                             'latency': {'average': 29.75,
321                                         'hdrh': u'HISTFAAAAEV4nJN...CALilDG0=',
322                                         'histogram': {20: 261,
323                                                         30: 431,
324                                                         40: 3,
325                                                         50: 80,
326                                                         60: 225},
327                                         'jitter': 23,
328                                         'last_max': 67,
329                                         'total_max': 67,
330                                         'total_min': 20}},
331                     384: {'err_cntrs': {'dropped': 0,
332                                         'dup': 0,
333                                         'out_of_order': 0,
334                                         'seq_too_high': 0,
335                                         'seq_too_low': 0},
336                             'latency': {'average': 18.0,
337                                         'hdrh': u'HISTFAAAADR4nJNpm...MjCwDDxAZG',
338                                         'histogram': {20: 987, 30: 14},
339                                         'jitter': 0,
340                                         'last_max': 34,
341                                         'total_max': 34,
342                                         'total_min': 20}},
343                     385: {'err_cntrs': {'dropped': 0,
344                                     'dup': 0,
345                                     'out_of_order': 0,
346                                     'seq_too_high': 0,
347                                     'seq_too_low': 0},
348                             'latency': {'average': 19.0,
349                                         'hdrh': u'HISTFAAAADR4nJNpm...NkYmJgDdagfK',
350                                         'histogram': {20: 989, 30: 11},
351                                         'jitter': 0,
352                                         'last_max': 38,
353                                         'total_max': 38,
354                                         'total_min': 20}},
355                     'global': {'bad_hdr': 0, 'old_flow': 0}},
356         """
357         total_max = 0
358         average = 0
359         total_min = float("inf")
360         for chain_id in range(self.chain_count):
361             try:
362                 _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
363                 lat = in_stats['latency'][lat_pg_id]['latency']
364                 # dropped_pkts += lat['err_cntrs']['dropped']
365                 total_max = max(lat['total_max'], total_max)
366                 total_min = min(lat['total_min'], total_min)
367                 average += lat['average']
368             except KeyError:
369                 pass
370         if total_min == float("inf"):
371             total_min = 0
372         results['min_delay_usec'] = total_min
373         results['max_delay_usec'] = total_max
374         results['avg_delay_usec'] = int(average / self.chain_count)
375
376     def _bind_vxlan(self):
377         bind_layers(UDP, VXLAN, dport=4789)
378         bind_layers(VXLAN, Ether)
379
380     def _create_pkt(self, stream_cfg, l2frame_size, disable_random_latency_flow=False):
381         """Create a packet of given size.
382
383         l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
384         """
385         # Trex will add the FCS field, so we need to remove 4 bytes from the l2 frame size
386         frame_size = int(l2frame_size) - 4
387         vm_param = []
388         if stream_cfg['vxlan'] is True:
389             self._bind_vxlan()
390             encap_level = '1'
391             pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
392             if stream_cfg['vtep_vlan'] is not None:
393                 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
394             pkt_base /= IP(src=stream_cfg['vtep_src_ip'], dst=stream_cfg['vtep_dst_ip'])
395             pkt_base /= UDP(sport=random.randint(1337, 32767), dport=4789)
396             pkt_base /= VXLAN(vni=stream_cfg['net_vni'])
397             pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
398             # need to randomize the outer header UDP src port based on flow
399             vxlan_udp_src_fv = STLVmFlowVar(
400                 name="vxlan_udp_src",
401                 min_value=1337,
402                 max_value=32767,
403                 size=2,
404                 op="random")
405             vm_param = [vxlan_udp_src_fv,
406                         STLVmWrFlowVar(fv_name="vxlan_udp_src", pkt_offset="UDP.sport")]
407         elif stream_cfg['mpls'] is True:
408             encap_level = '0'
409             pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
410             if stream_cfg['vtep_vlan'] is not None:
411                 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
412             if stream_cfg['mpls_outer_label'] is not None:
413                 pkt_base /= MPLS(label=stream_cfg['mpls_outer_label'], cos=1, s=0, ttl=255)
414             if stream_cfg['mpls_inner_label'] is not None:
415                 pkt_base /= MPLS(label=stream_cfg['mpls_inner_label'], cos=1, s=1, ttl=255)
416             #  Flow stats and MPLS labels randomization TBD
417             pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
418         else:
419             encap_level = '0'
420             pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
421
422         if stream_cfg['vlan_tag'] is not None:
423             pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
424
425         udp_args = {}
426         if stream_cfg['udp_src_port']:
427             udp_args['sport'] = int(stream_cfg['udp_src_port'])
428             if stream_cfg['udp_port_step'] == 'random':
429                 step = 1
430             else:
431                 step = stream_cfg['udp_port_step']
432             udp_args['sport_step'] = int(step)
433             udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
434         if stream_cfg['udp_dst_port']:
435             udp_args['dport'] = int(stream_cfg['udp_dst_port'])
436             if stream_cfg['udp_port_step'] == 'random':
437                 step = 1
438             else:
439                 step = stream_cfg['udp_port_step']
440             udp_args['dport_step'] = int(step)
441             udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
442
443         pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
444                     UDP(dport=udp_args['dport'], sport=udp_args['sport'])
445
446         # STLVmTupleGen need flow count >= cores used by TRex, if FC < cores we used STLVmFlowVar
447         if stream_cfg['ip_addrs_step'] == '0.0.0.1' and stream_cfg['udp_port_step'] == '1' and \
448                 stream_cfg['count'] >= self.generator_config.cores:
449             src_fv = STLVmTupleGen(ip_min=stream_cfg['ip_src_addr'],
450                                    ip_max=stream_cfg['ip_src_addr_max'],
451                                    port_min=udp_args['sport'],
452                                    port_max=udp_args['sport_max'],
453                                    name="tuple_src",
454                                    limit_flows=stream_cfg['count'])
455             dst_fv = STLVmTupleGen(ip_min=stream_cfg['ip_dst_addr'],
456                                    ip_max=stream_cfg['ip_dst_addr_max'],
457                                    port_min=udp_args['dport'],
458                                    port_max=udp_args['dport_max'],
459                                    name="tuple_dst",
460                                    limit_flows=stream_cfg['count'])
461             vm_param = [
462                 src_fv,
463                 STLVmWrFlowVar(fv_name="tuple_src.ip",
464                                pkt_offset="IP:{}.src".format(encap_level)),
465                 STLVmWrFlowVar(fv_name="tuple_src.port",
466                                pkt_offset="UDP:{}.sport".format(encap_level)),
467                 dst_fv,
468                 STLVmWrFlowVar(fv_name="tuple_dst.ip",
469                                pkt_offset="IP:{}.dst".format(encap_level)),
470                 STLVmWrFlowVar(fv_name="tuple_dst.port",
471                                pkt_offset="UDP:{}.dport".format(encap_level)),
472             ]
473         else:
474             if disable_random_latency_flow:
475                 src_fv_ip = STLVmFlowVar(
476                     name="ip_src",
477                     min_value=stream_cfg['ip_src_addr'],
478                     max_value=stream_cfg['ip_src_addr'],
479                     size=4)
480                 dst_fv_ip = STLVmFlowVar(
481                     name="ip_dst",
482                     min_value=stream_cfg['ip_dst_addr'],
483                     max_value=stream_cfg['ip_dst_addr'],
484                     size=4)
485             elif stream_cfg['ip_addrs_step'] == 'random':
486                 src_fv_ip = STLVmFlowVarRepeatableRandom(
487                     name="ip_src",
488                     min_value=stream_cfg['ip_src_addr'],
489                     max_value=stream_cfg['ip_src_addr_max'],
490                     size=4,
491                     seed=random.randint(0, 32767),
492                     limit=stream_cfg['ip_src_count'])
493                 dst_fv_ip = STLVmFlowVarRepeatableRandom(
494                     name="ip_dst",
495                     min_value=stream_cfg['ip_dst_addr'],
496                     max_value=stream_cfg['ip_dst_addr_max'],
497                     size=4,
498                     seed=random.randint(0, 32767),
499                     limit=stream_cfg['ip_dst_count'])
500             else:
501                 src_fv_ip = STLVmFlowVar(
502                     name="ip_src",
503                     min_value=stream_cfg['ip_src_addr'],
504                     max_value=stream_cfg['ip_src_addr_max'],
505                     size=4,
506                     op="inc",
507                     step=stream_cfg['ip_addrs_step'])
508                 dst_fv_ip = STLVmFlowVar(
509                     name="ip_dst",
510                     min_value=stream_cfg['ip_dst_addr'],
511                     max_value=stream_cfg['ip_dst_addr_max'],
512                     size=4,
513                     op="inc",
514                     step=stream_cfg['ip_addrs_step'])
515
516             if disable_random_latency_flow:
517                 src_fv_port = STLVmFlowVar(
518                     name="p_src",
519                     min_value=udp_args['sport'],
520                     max_value=udp_args['sport'],
521                     size=2)
522                 dst_fv_port = STLVmFlowVar(
523                     name="p_dst",
524                     min_value=udp_args['dport'],
525                     max_value=udp_args['dport'],
526                     size=2)
527             elif stream_cfg['udp_port_step'] == 'random':
528                 src_fv_port = STLVmFlowVarRepeatableRandom(
529                     name="p_src",
530                     min_value=udp_args['sport'],
531                     max_value=udp_args['sport_max'],
532                     size=2,
533                     seed=random.randint(0, 32767),
534                     limit=stream_cfg['udp_src_count'])
535                 dst_fv_port = STLVmFlowVarRepeatableRandom(
536                     name="p_dst",
537                     min_value=udp_args['dport'],
538                     max_value=udp_args['dport_max'],
539                     size=2,
540                     seed=random.randint(0, 32767),
541                     limit=stream_cfg['udp_dst_count'])
542             else:
543                 src_fv_port = STLVmFlowVar(
544                     name="p_src",
545                     min_value=udp_args['sport'],
546                     max_value=udp_args['sport_max'],
547                     size=2,
548                     op="inc",
549                     step=udp_args['sport_step'])
550                 dst_fv_port = STLVmFlowVar(
551                     name="p_dst",
552                     min_value=udp_args['dport'],
553                     max_value=udp_args['dport_max'],
554                     size=2,
555                     op="inc",
556                     step=udp_args['dport_step'])
557             vm_param = [
558                 src_fv_ip,
559                 STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
560                 src_fv_port,
561                 STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
562                 dst_fv_ip,
563                 STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
564                 dst_fv_port,
565                 STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
566             ]
567         # Use HW Offload to calculate the outter IP/UDP packet
568         vm_param.append(STLVmFixChecksumHw(l3_offset="IP:0",
569                                            l4_offset="UDP:0",
570                                            l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP))
571         # Use software to fix the inner IP/UDP payload for VxLAN packets
572         if int(encap_level):
573             vm_param.append(STLVmFixIpv4(offset="IP:1"))
574         pad = max(0, frame_size - len(pkt_base)) * 'x'
575
576         return STLPktBuilder(pkt=pkt_base / pad,
577                              vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
578
579     def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
580                          e2e=False):
581         """Create a list of streams corresponding to a given chain and stream config.
582
583         port: port where the streams originate (0 or 1)
584         chain_id: the chain to which the streams are associated to
585         stream_cfg: stream configuration
586         l2frame: L2 frame size (including 4-byte FCS) or 'IMIX'
587         latency: if True also create a latency stream
588         e2e: True if performing "end to end" connectivity check
589         """
590         streams = []
591         pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
592         if self.config.no_flow_stats:
593             LOG.info("Traffic flow statistics are disabled.")
594         if l2frame == 'IMIX':
595             for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
596                 pkt = self._create_pkt(stream_cfg, l2_frame_size)
597                 if e2e or stream_cfg['mpls']:
598                     streams.append(STLStream(packet=pkt,
599                                              mode=STLTXCont(pps=ratio)))
600                 else:
601                     if stream_cfg['vxlan'] is True:
602                         streams.append(STLStream(packet=pkt,
603                                                  flow_stats=STLFlowStats(pg_id=pg_id,
604                                                                          vxlan=True)
605                                                  if not self.config.no_flow_stats else None,
606                                                  mode=STLTXCont(pps=ratio)))
607                     else:
608                         streams.append(STLStream(packet=pkt,
609                                                  flow_stats=STLFlowStats(pg_id=pg_id)
610                                                  if not self.config.no_flow_stats else None,
611                                                  mode=STLTXCont(pps=ratio)))
612
613             if latency:
614                 # for IMIX, the latency packets have the average IMIX packet size
615                 if stream_cfg['ip_addrs_step'] == 'random' or \
616                         stream_cfg['udp_port_step'] == 'random':
617                     # Force latency flow to only one flow to avoid creating flows
618                     # over requested flow count
619                     pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE, True)
620                 else:
621                     pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
622
623         else:
624             l2frame_size = int(l2frame)
625             pkt = self._create_pkt(stream_cfg, l2frame_size)
626             if e2e or stream_cfg['mpls']:
627                 streams.append(STLStream(packet=pkt,
628                                          # Flow stats is disabled for MPLS now
629                                          # flow_stats=STLFlowStats(pg_id=pg_id),
630                                          mode=STLTXCont()))
631             else:
632                 if stream_cfg['vxlan'] is True:
633                     streams.append(STLStream(packet=pkt,
634                                              flow_stats=STLFlowStats(pg_id=pg_id,
635                                                                      vxlan=True)
636                                              if not self.config.no_flow_stats else None,
637                                              mode=STLTXCont()))
638                 else:
639                     streams.append(STLStream(packet=pkt,
640                                              flow_stats=STLFlowStats(pg_id=pg_id)
641                                              if not self.config.no_flow_stats else None,
642                                              mode=STLTXCont()))
643             # for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
644             # without vlan, the min l2 frame size is 64
645             # with vlan it is 68
646             # This only applies to the latency stream
647             if latency:
648                 if stream_cfg['vlan_tag'] and l2frame_size < 68:
649                     l2frame_size = 68
650                 if stream_cfg['ip_addrs_step'] == 'random' or \
651                         stream_cfg['udp_port_step'] == 'random':
652                         # Force latency flow to only one flow to avoid creating flows
653                         # over requested flow count
654                     pkt = self._create_pkt(stream_cfg, l2frame_size, True)
655                 else:
656                     pkt = self._create_pkt(stream_cfg, l2frame_size)
657
658         if latency:
659             if self.config.no_latency_stats:
660                 LOG.info("Latency flow statistics are disabled.")
661             if stream_cfg['vxlan'] is True:
662                 streams.append(STLStream(packet=pkt,
663                                          flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
664                                                                         vxlan=True)
665                                          if not self.config.no_latency_stats else None,
666                                          mode=STLTXCont(pps=self.LATENCY_PPS)))
667             else:
668                 streams.append(STLStream(packet=pkt,
669                                          flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
670                                          if not self.config.no_latency_stats else None,
671                                          mode=STLTXCont(pps=self.LATENCY_PPS)))
672         return streams
673
674     @timeout(5)
675     def __connect(self, client):
676         client.connect()
677
678     def __local_server_status(self):
679         """ The TRex server may have started but failed initializing... and stopped.
680         This piece of code is especially designed to address
681         the case when a fatal failure occurs on a DPDK init call.
682         The TRex algorihm should be revised to include some missing timeouts (?)
683         status returned:
684           0: no error detected
685           1: fatal error detected - should lead to exiting the run
686           2: error detected that could be solved by starting again
687         The diagnostic is based on parsing the local trex log file (improvable)
688         """
689         status = 0
690         message = None
691         failure = None
692         exited = None
693         cause = None
694         error = None
695         before = None
696         after = None
697         last = None
698         try:
699             with open('/tmp/trex.log', 'r') as trex_log:
700                 for _line in trex_log:
701                     line = _line.strip()
702                     if line.startswith('Usage:'):
703                         break
704                     if 'ports are bound' in line:
705                         continue
706                     if 'please wait' in line:
707                         continue
708                     if 'exit' in line.lower():
709                         exited = line
710                     elif 'cause' in line.lower():
711                         cause = line
712                     elif 'fail' in line.lower():
713                         failure = line
714                     elif 'msg' in line.lower():
715                         message = line
716                     elif (error is not None) and line:
717                         after = line
718                     elif line.startswith('Error:') or line.startswith('ERROR'):
719                         error = line
720                         before = last
721                     last = line
722         except FileNotFoundError:
723             pass
724         if exited is not None:
725             status = 1
726             LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', exited)
727             if cause is not None:
728                 LOG.info("TRex [cont'd] %s", cause)
729             if failure is not None:
730                 LOG.info("TRex [cont'd] %s", failure)
731             if message is not None:
732                 LOG.info("TRex [cont'd] %s", message)
733                 if 'not supported yet' in message.lower():
734                     LOG.info("TRex [cont'd] Try starting again!")
735                     status = 2
736         elif error is not None:
737             status = 1
738             LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', error)
739             if after is not None:
740                 LOG.info("TRex [cont'd] %s", after)
741             elif before is not None:
742                 LOG.info("TRex [cont'd] %s", before)
743         return status
744
745     def __connect_after_start(self):
746         # after start, Trex may take a bit of time to initialize
747         # so we need to retry a few times
748         # we try to capture recoverable error cases (checking status)
749         status = 0
750         for it in range(self.config.generic_retry_count):
751             try:
752                 time.sleep(1)
753                 self.client.connect()
754                 break
755             except Exception as ex:
756                 if it == (self.config.generic_retry_count - 1):
757                     raise
758                 status = self.__local_server_status()
759                 if status > 0:
760                     # No need to wait anymore, something went wrong and TRex exited
761                     if status == 1:
762                         LOG.info("\x1b[1m%s\x1b[0m", 'TRex failed starting!')
763                         print("More information? Try the command: "
764                             + "\x1b[1mnfvbench --show-trex-log\x1b[0m")
765                         sys.exit(0)
766                     if status == 2:
767                         # a new start will follow
768                         return status
769                 LOG.info("Retrying connection to TRex (%s)...", ex.msg)
770         return status
771
772     def connect(self):
773         """Connect to the TRex server."""
774         status = 0
775         server_ip = self.generator_config.ip
776         LOG.info("Connecting to TRex (%s)...", server_ip)
777
778         # Connect to TRex server
779         self.client = STLClient(server=server_ip, sync_port=self.generator_config.zmq_rpc_port,
780                                 async_port=self.generator_config.zmq_pub_port)
781         try:
782             self.__connect(self.client)
783             if server_ip == '127.0.0.1':
784                 config_updated = self.__check_config()
785                 if config_updated or self.config.restart:
786                     status = self.__restart()
787         except (TimeoutError, STLError) as e:
788             if server_ip == '127.0.0.1':
789                 status = self.__start_local_server()
790             else:
791                 raise TrafficGeneratorException(e.message) from e
792
793         if status == 2:
794             # Workaround in case of a failed TRex server initialization
795             # we try to start it again (twice maximum)
796             # which may allow low level initialization to complete.
797             if self.__start_local_server() == 2:
798                 self.__start_local_server()
799
800         ports = list(self.generator_config.ports)
801         self.port_handle = ports
802         # Prepare the ports
803         self.client.reset(ports)
804         # Read HW information from each port
805         # this returns an array of dict (1 per port)
806         """
807         Example of output for Intel XL710
808         [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
809           'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
810           u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
811           u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
812           u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
813           'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
814           u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
815           'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
816           'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
817           'layer_mode': 'Ethernet', u'numa': 0}, ...]
818         """
819         self.port_info = self.client.get_port_info(ports)
820         LOG.info('Connected to TRex')
821         for id, port in enumerate(self.port_info):
822             LOG.info('   Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
823                      id, port['description'], port['speed'], port['src_mac'],
824                      port['pci_addr'], port['driver'])
825         # Make sure the 2 ports have the same speed
826         if self.port_info[0]['speed'] != self.port_info[1]['speed']:
827             raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
828                                             (self.port_info[0]['speed'],
829                                              self.port_info[1]['speed']))
830
831     def __start_local_server(self):
832         try:
833             LOG.info("Starting TRex ...")
834             self.__start_server()
835             status = self.__connect_after_start()
836         except (TimeoutError, STLError) as e:
837             LOG.error('Cannot connect to TRex')
838             LOG.error(traceback.format_exc())
839             logpath = '/tmp/trex.log'
840             if os.path.isfile(logpath):
841                 # Wait for TRex to finish writing error message
842                 last_size = 0
843                 for _ in range(self.config.generic_retry_count):
844                     size = os.path.getsize(logpath)
845                     if size == last_size:
846                         # probably not writing anymore
847                         break
848                     last_size = size
849                     time.sleep(1)
850                 with open(logpath, 'r') as f:
851                     message = f.read()
852             else:
853                 message = e.message
854             raise TrafficGeneratorException(message) from e
855         return status
856
857     def __start_server(self):
858         server = TRexTrafficServer()
859         server.run_server(self.generator_config)
860
861     def __check_config(self):
862         server = TRexTrafficServer()
863         return server.check_config_updated(self.generator_config)
864
865     def __restart(self):
866         LOG.info("Restarting TRex ...")
867         self.__stop_server()
868         # Wait for server stopped
869         for _ in range(self.config.generic_retry_count):
870             time.sleep(1)
871             if not self.client.is_connected():
872                 LOG.info("TRex is stopped...")
873                 break
874         # Start and report a possible failure
875         return self.__start_local_server()
876
877     def __stop_server(self):
878         if self.generator_config.ip == '127.0.0.1':
879             ports = self.client.get_acquired_ports()
880             LOG.info('Release ports %s and stopping TRex...', ports)
881             try:
882                 if ports:
883                     self.client.release(ports=ports)
884                 self.client.server_shutdown()
885             except STLError as e:
886                 LOG.warning('Unable to stop TRex. Error: %s', e)
887         else:
888             LOG.info('Using remote TRex. Unable to stop TRex')
889
890     def resolve_arp(self):
891         """Resolve all configured remote IP addresses.
892
893         return: None if ARP failed to resolve for all IP addresses
894                 else a dict of list of dest macs indexed by port#
895                 the dest macs in the list are indexed by the chain id
896         """
897         self.client.set_service_mode(ports=self.port_handle)
898         LOG.info('Polling ARP until successful...')
899         arp_dest_macs = {}
900         for port, device in zip(self.port_handle, self.generator_config.devices):
901             # there should be 1 stream config per chain
902             stream_configs = device.get_stream_configs()
903             chain_count = len(stream_configs)
904             ctx = self.client.create_service_ctx(port=port)
905             # all dest macs on this port indexed by chain ID
906             dst_macs = [None] * chain_count
907             dst_macs_count = 0
908             # the index in the list is the chain id
909             if self.config.vxlan or self.config.mpls:
910                 arps = [
911                     ServiceARP(ctx,
912                                src_ip=device.vtep_src_ip,
913                                dst_ip=device.vtep_dst_ip,
914                                vlan=device.vtep_vlan)
915                     for cfg in stream_configs
916                 ]
917             else:
918                 arps = [
919                     ServiceARP(ctx,
920                                src_ip=cfg['ip_src_tg_gw'],
921                                dst_ip=cfg['mac_discovery_gw'],
922                                # will be None if no vlan tagging
923                                vlan=cfg['vlan_tag'])
924                     for cfg in stream_configs
925                 ]
926
927             for attempt in range(self.config.generic_retry_count):
928                 try:
929                     ctx.run(arps)
930                 except STLError:
931                     LOG.error(traceback.format_exc())
932                     continue
933
934                 unresolved = []
935                 for chain_id, mac in enumerate(dst_macs):
936                     if not mac:
937                         arp_record = arps[chain_id].get_record()
938                         if arp_record.dst_mac:
939                             dst_macs[chain_id] = arp_record.dst_mac
940                             dst_macs_count += 1
941                             LOG.info('   ARP: port=%d chain=%d src IP=%s dst IP=%s -> MAC=%s',
942                                      port, chain_id,
943                                      arp_record.src_ip,
944                                      arp_record.dst_ip, arp_record.dst_mac)
945                         else:
946                             unresolved.append(arp_record.dst_ip)
947                 if dst_macs_count == chain_count:
948                     arp_dest_macs[port] = dst_macs
949                     LOG.info('ARP resolved successfully for port %s', port)
950                     break
951
952                 retry = attempt + 1
953                 LOG.info('Retrying ARP for: %s (retry %d/%d)',
954                          unresolved, retry, self.config.generic_retry_count)
955                 if retry < self.config.generic_retry_count:
956                     time.sleep(self.config.generic_poll_sec)
957             else:
958                 LOG.error('ARP timed out for port %s (resolved %d out of %d)',
959                           port,
960                           dst_macs_count,
961                           chain_count)
962                 break
963
964         # A traffic capture may have been started (from a T-Rex console) at this time.
965         # If asked so, we keep the service mode enabled here, and disable it otherwise.
966         #  | Disabling the service mode while a capture is in progress
967         #  | would cause the application to stop/crash with an error.
968         if not self.config.service_mode:
969             self.client.set_service_mode(ports=self.port_handle, enabled=False)
970         if len(arp_dest_macs) == len(self.port_handle):
971             return arp_dest_macs
972         return None
973
974     def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
975         """Check if rate provided by user is above requirements. Applies only if latency is True."""
976         intf_speed = self.generator_config.intf_speed
977         if latency:
978             if bidirectional:
979                 mult = 2
980                 total_rate = 0
981                 for rate in rates:
982                     r = utils.convert_rates(l2frame_size, rate, intf_speed)
983                     total_rate += int(r['rate_pps'])
984             else:
985                 mult = 1
986                 r = utils.convert_rates(l2frame_size, rates[0], intf_speed)
987                 total_rate = int(r['rate_pps'])
988             # rate must be enough for latency stream and at least 1 pps for base stream per chain
989             required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
990             result = utils.convert_rates(l2frame_size,
991                                          {'rate_pps': required_rate},
992                                          intf_speed * mult)
993             result['result'] = total_rate >= required_rate
994             return result
995
996         return {'result': True}
997
998     def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
999         """Program all the streams in Trex server.
1000
1001         l2frame_size: L2 frame size or IMIX
1002         rates: a list of 2 rates to run each direction
1003                each rate is a dict like {'rate_pps': '10kpps'}
1004         bidirectional: True if bidirectional
1005         latency: True if latency measurement is needed
1006         e2e: True if performing "end to end" connectivity check
1007         """
1008         r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
1009         if not r['result']:
1010             raise TrafficGeneratorException(
1011                 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
1012                 .format(pps=r['rate_pps'],
1013                         bps=r['rate_bps'],
1014                         load=r['rate_percent']))
1015         self.l2_frame_size = l2frame_size
1016         # a dict of list of streams indexed by port#
1017         # in case of fixed size, has self.chain_count * 2 * 2 streams
1018         # (1 normal + 1 latency stream per direction per chain)
1019         # for IMIX, has self.chain_count * 2 * 4 streams
1020         # (3 normal + 1 latency stream per direction per chain)
1021         streamblock = {}
1022         for port in self.port_handle:
1023             streamblock[port] = []
1024         stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
1025         if self.generator_config.ip_addrs_step == 'random' \
1026                 or self.generator_config.gen_config.udp_port_step == 'random':
1027             LOG.warning("Using random step, the number of flows can be less than "
1028                         "the requested number of flows due to repeatable multivariate random "
1029                         "generation which can reproduce the same pattern of values")
1030         self.rates = [utils.to_rate_str(rate) for rate in rates]
1031         for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
1032             streamblock[0].extend(self.generate_streams(self.port_handle[0],
1033                                                         chain_id,
1034                                                         fwd_stream_cfg,
1035                                                         l2frame_size,
1036                                                         latency=latency,
1037                                                         e2e=e2e))
1038             if len(self.rates) > 1:
1039                 streamblock[1].extend(self.generate_streams(self.port_handle[1],
1040                                                             chain_id,
1041                                                             rev_stream_cfg,
1042                                                             l2frame_size,
1043                                                             latency=bidirectional and latency,
1044                                                             e2e=e2e))
1045
1046         for port in self.port_handle:
1047             if self.config.vxlan:
1048                 self.client.set_port_attr(ports=port, vxlan_fs=[4789])
1049             else:
1050                 self.client.set_port_attr(ports=port, vxlan_fs=None)
1051             self.client.add_streams(streamblock[port], ports=port)
1052             LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
1053
1054     def clear_streamblock(self):
1055         """Clear all streams from TRex."""
1056         self.rates = []
1057         self.client.reset(self.port_handle)
1058         LOG.info('Cleared all existing streams')
1059
1060     def get_stats(self, if_stats=None):
1061         """Get stats from Trex."""
1062         stats = self.client.get_stats()
1063         return self.extract_stats(stats, if_stats)
1064
1065     def get_macs(self):
1066         """Return the Trex local port MAC addresses.
1067
1068         return: a list of MAC addresses indexed by the port#
1069         """
1070         return [port['src_mac'] for port in self.port_info]
1071
1072     def get_port_speed_gbps(self):
1073         """Return the Trex local port MAC addresses.
1074
1075         return: a list of speed in Gbps indexed by the port#
1076         """
1077         return [port['speed'] for port in self.port_info]
1078
1079     def clear_stats(self):
1080         """Clear all stats in the traffic gneerator."""
1081         if self.port_handle:
1082             self.client.clear_stats()
1083
1084     def start_traffic(self):
1085         """Start generating traffic in all ports."""
1086         for port, rate in zip(self.port_handle, self.rates):
1087             self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
1088
1089     def stop_traffic(self):
1090         """Stop generating traffic."""
1091         self.client.stop(ports=self.port_handle)
1092
1093     def start_capture(self):
1094         """Capture all packets on both ports that are unicast to us."""
1095         if self.capture_id:
1096             self.stop_capture()
1097         # Need to filter out unwanted packets so we do not end up counting
1098         # src MACs of frames that are not unicast to us
1099         src_mac_list = self.get_macs()
1100         bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
1101         # ports must be set in service in order to enable capture
1102         self.client.set_service_mode(ports=self.port_handle)
1103         self.capture_id = self.client.start_capture \
1104             (rx_ports=self.port_handle, bpf_filter=bpf_filter)
1105
1106     def fetch_capture_packets(self):
1107         """Fetch capture packets in capture mode."""
1108         if self.capture_id:
1109             self.packet_list = []
1110             self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
1111                                               output=self.packet_list)
1112
1113     def stop_capture(self):
1114         """Stop capturing packets."""
1115         if self.capture_id:
1116             self.client.stop_capture(capture_id=self.capture_id['id'])
1117             self.capture_id = None
1118             # A traffic capture may have been started (from a T-Rex console) at this time.
1119             # If asked so, we keep the service mode enabled here, and disable it otherwise.
1120             #  | Disabling the service mode while a capture is in progress
1121             #  | would cause the application to stop/crash with an error.
1122             if not self.config.service_mode:
1123                 self.client.set_service_mode(ports=self.port_handle, enabled=False)
1124
1125     def cleanup(self):
1126         """Cleanup Trex driver."""
1127         if self.client:
1128             try:
1129                 self.client.reset(self.port_handle)
1130                 self.client.disconnect()
1131             except STLError:
1132                 # TRex does not like a reset while in disconnected state
1133                 pass
1134
1135     def set_service_mode(self, enabled=True):
1136         """Enable/disable the 'service' mode."""
1137         self.client.set_service_mode(ports=self.port_handle, enabled=enabled)