[NFVBENCH-137] Fix L4 checksums for VxLAN (again)
[nfvbench.git] / nfvbench / traffic_gen / trex_gen.py
1 # Copyright 2016 Cisco Systems, Inc.  All rights reserved.
2 #
3 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
4 #    not use this file except in compliance with the License. You may obtain
5 #    a copy of the License at
6 #
7 #         http://www.apache.org/licenses/LICENSE-2.0
8 #
9 #    Unless required by applicable law or agreed to in writing, software
10 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 #    License for the specific language governing permissions and limitations
13 #    under the License.
14 """Driver module for TRex traffic generator."""
15
16 import math
17 import os
18 import random
19 import time
20 import traceback
21
22 from itertools import count
23 # pylint: disable=import-error
24 from scapy.contrib.mpls import MPLS  # flake8: noqa
25 # pylint: enable=import-error
26 from nfvbench.log import LOG
27 from nfvbench.traffic_server import TRexTrafficServer
28 from nfvbench.utils import cast_integer
29 from nfvbench.utils import timeout
30 from nfvbench.utils import TimeoutError
31
32 # pylint: disable=import-error
33 from trex.common.services.trex_service_arp import ServiceARP
34 from trex.stl.api import bind_layers
35 from trex.stl.api import CTRexVmInsFixHwCs
36 from trex.stl.api import Dot1Q
37 from trex.stl.api import Ether
38 from trex.stl.api import FlagsField
39 from trex.stl.api import IP
40 from trex.stl.api import Packet
41 from trex.stl.api import STLClient
42 from trex.stl.api import STLError
43 from trex.stl.api import STLFlowLatencyStats
44 from trex.stl.api import STLFlowStats
45 from trex.stl.api import STLPktBuilder
46 from trex.stl.api import STLScVmRaw
47 from trex.stl.api import STLStream
48 from trex.stl.api import STLTXCont
49 from trex.stl.api import STLVmFixChecksumHw
50 from trex.stl.api import STLVmFlowVar
51 from trex.stl.api import STLVmFlowVarRepeatableRandom
52 from trex.stl.api import STLVmWrFlowVar
53 from trex.stl.api import ThreeBytesField
54 from trex.stl.api import UDP
55 from trex.stl.api import XByteField
56
57 # pylint: enable=import-error
58
59 from .traffic_base import AbstractTrafficGenerator
60 from .traffic_base import TrafficGeneratorException
61 from . import traffic_utils as utils
62 from .traffic_utils import IMIX_AVG_L2_FRAME_SIZE
63 from .traffic_utils import IMIX_L2_SIZES
64 from .traffic_utils import IMIX_RATIOS
65
66 class VXLAN(Packet):
67     """VxLAN class."""
68
69     _VXLAN_FLAGS = ['R' * 27] + ['I'] + ['R' * 5]
70     name = "VXLAN"
71     fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS),
72                    ThreeBytesField("vni", 0),
73                    XByteField("reserved", 0x00)]
74
75     def mysummary(self):
76         """Summary."""
77         return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
78
79 class TRex(AbstractTrafficGenerator):
80     """TRex traffic generator driver."""
81
82     LATENCY_PPS = 1000
83     CHAIN_PG_ID_MASK = 0x007F
84     PORT_PG_ID_MASK = 0x0080
85     LATENCY_PG_ID_MASK = 0x0100
86
87     def __init__(self, traffic_client):
88         """Trex driver."""
89         AbstractTrafficGenerator.__init__(self, traffic_client)
90         self.client = None
91         self.id = count()
92         self.port_handle = []
93         self.chain_count = self.generator_config.service_chain_count
94         self.rates = []
95         self.capture_id = None
96         self.packet_list = []
97
98     def get_version(self):
99         """Get the Trex version."""
100         return self.client.get_server_version() if self.client else ''
101
102     def get_pg_id(self, port, chain_id):
103         """Calculate the packet group IDs to use for a given port/stream type/chain_id.
104
105         port: 0 or 1
106         chain_id: identifies to which chain the pg_id is associated (0 to 255)
107         return: pg_id, lat_pg_id
108
109         We use a bit mask to set up the 3 fields:
110         0x007F: chain ID (8 bits for a max of 128 chains)
111         0x0080: port bit
112         0x0100: latency bit
113         """
114         pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
115         return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
116
117     def extract_stats(self, in_stats):
118         """Extract stats from dict returned by Trex API.
119
120         :param in_stats: dict as returned by TRex api
121         """
122         utils.nan_replace(in_stats)
123         # LOG.debug(in_stats)
124
125         result = {}
126         # port_handles should have only 2 elements: [0, 1]
127         # so (1 - ph) will be the index for the far end port
128         for ph in self.port_handle:
129             stats = in_stats[ph]
130             far_end_stats = in_stats[1 - ph]
131             result[ph] = {
132                 'tx': {
133                     'total_pkts': cast_integer(stats['opackets']),
134                     'total_pkt_bytes': cast_integer(stats['obytes']),
135                     'pkt_rate': cast_integer(stats['tx_pps']),
136                     'pkt_bit_rate': cast_integer(stats['tx_bps'])
137                 },
138                 'rx': {
139                     'total_pkts': cast_integer(stats['ipackets']),
140                     'total_pkt_bytes': cast_integer(stats['ibytes']),
141                     'pkt_rate': cast_integer(stats['rx_pps']),
142                     'pkt_bit_rate': cast_integer(stats['rx_bps']),
143                     # how many pkts were dropped in RX direction
144                     # need to take the tx counter on the far end port
145                     'dropped_pkts': cast_integer(
146                         far_end_stats['opackets'] - stats['ipackets'])
147                 }
148             }
149             self.__combine_latencies(in_stats, result[ph]['rx'], ph)
150
151         total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
152         result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
153         result["flow_stats"] = in_stats["flow_stats"]
154         result["latency"] = in_stats["latency"]
155         return result
156
157     def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
158         """Extract the aggregated stats for a given chain.
159
160         trex_stats: stats as returned by get_stats()
161         if_stats: a list of 2 interface stats to update (port 0 and 1)
162         latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
163                    latencies[p] is the latency for packets sent on port p
164                    if there are no latency streams, the Latency instances are not modified
165         chain_idx: chain index of the interface stats
166
167         The packet counts include normal and latency streams.
168
169         Trex returns flows stats as follows:
170
171         'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
172                    'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
173                    'rx_bytes': {0: nan, 1: nan, 'total': nan},
174                    'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
175                    'rx_pps': {0: 0, 1: 0, 'total': 0},
176                    'tx_bps': {0: 0, 1: 0, 'total': 0},
177                    'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
178                    'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
179                    'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
180                    'tx_pps': {0: 0, 1: 0, 'total': 0}},
181                1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
182                    'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
183                    'rx_bytes': {0: nan, 1: nan, 'total': nan},
184                    'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
185                    'rx_pps': {0: 0, 1: 0, 'total': 0},
186                    'tx_bps': {0: 0, 1: 0, 'total': 0},
187                    'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
188                    'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
189                    'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
190                    'tx_pps': {0: 0, 1: 0, 'total': 0}},
191                 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
192                 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
193                 'rx_bytes': {0: nan, 1: nan, 'total': nan},
194                 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
195                 'rx_pps': {0: 0, 1: 0, 'total': 0},
196                 'tx_bps': {0: 0, 1: 0, 'total': 0},
197                 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
198                 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
199                 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
200                 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
201
202         the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
203         get_pg_id() method.
204         packet counters for a given stream sent on port p are reported as:
205         - tx_pkts[p] on port p
206         - rx_pkts[1-p] on the far end port
207
208         This is a tricky/critical counter transposition operation because
209         the results are grouped by port (not by stream):
210         tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
211         rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
212         tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
213         rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
214
215         or using a more generic formula:
216         tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
217         rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
218
219         the second formula is equivalent to
220         rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
221
222         If there are latency streams, those same counters need to be added in the same way
223         """
224         def get_latency(lval):
225             try:
226                 return int(round(lval))
227             except ValueError:
228                 return 0
229
230         for ifs in if_stats:
231             ifs.tx = ifs.rx = 0
232         for port in range(2):
233             pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
234             for pid in [pg_id, lat_pg_id]:
235                 try:
236                     pg_stats = trex_stats['flow_stats'][pid]
237                     if_stats[port].tx += pg_stats['tx_pkts'][port]
238                     if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
239                 except KeyError:
240                     pass
241             try:
242                 lat = trex_stats['latency'][lat_pg_id]['latency']
243                 # dropped_pkts += lat['err_cntrs']['dropped']
244                 latencies[port].max_usec = get_latency(lat['total_max'])
245                 if math.isnan(lat['total_min']):
246                     latencies[port].min_usec = 0
247                     latencies[port].avg_usec = 0
248                 else:
249                     latencies[port].min_usec = get_latency(lat['total_min'])
250                     latencies[port].avg_usec = get_latency(lat['average'])
251                 # pick up the HDR histogram if present (otherwise will raise KeyError)
252                 latencies[port].hdrh = lat['hdrh']
253             except KeyError:
254                 pass
255
256     def __combine_latencies(self, in_stats, results, port_handle):
257         """Traverse TRex result dictionary and combines chosen latency stats.
258
259           example of latency dict returned by trex (2 chains):
260          'latency': {256: {'err_cntrs': {'dropped': 0,
261                                  'dup': 0,
262                                  'out_of_order': 0,
263                                  'seq_too_high': 0,
264                                  'seq_too_low': 0},
265                             'latency': {'average': 26.5,
266                                         'hdrh': u'HISTFAAAAEx4nJNpmSgz...bFRgxi',
267                                         'histogram': {20: 303,
268                                                         30: 320,
269                                                         40: 300,
270                                                         50: 73,
271                                                         60: 4,
272                                                         70: 1},
273                                         'jitter': 14,
274                                         'last_max': 63,
275                                         'total_max': 63,
276                                         'total_min': 20}},
277                     257: {'err_cntrs': {'dropped': 0,
278                                         'dup': 0,
279                                         'out_of_order': 0,
280                                         'seq_too_high': 0,
281                                         'seq_too_low': 0},
282                             'latency': {'average': 29.75,
283                                         'hdrh': u'HISTFAAAAEV4nJN...CALilDG0=',
284                                         'histogram': {20: 261,
285                                                         30: 431,
286                                                         40: 3,
287                                                         50: 80,
288                                                         60: 225},
289                                         'jitter': 23,
290                                         'last_max': 67,
291                                         'total_max': 67,
292                                         'total_min': 20}},
293                     384: {'err_cntrs': {'dropped': 0,
294                                         'dup': 0,
295                                         'out_of_order': 0,
296                                         'seq_too_high': 0,
297                                         'seq_too_low': 0},
298                             'latency': {'average': 18.0,
299                                         'hdrh': u'HISTFAAAADR4nJNpm...MjCwDDxAZG',
300                                         'histogram': {20: 987, 30: 14},
301                                         'jitter': 0,
302                                         'last_max': 34,
303                                         'total_max': 34,
304                                         'total_min': 20}},
305                     385: {'err_cntrs': {'dropped': 0,
306                                     'dup': 0,
307                                     'out_of_order': 0,
308                                     'seq_too_high': 0,
309                                     'seq_too_low': 0},
310                             'latency': {'average': 19.0,
311                                         'hdrh': u'HISTFAAAADR4nJNpm...NkYmJgDdagfK',
312                                         'histogram': {20: 989, 30: 11},
313                                         'jitter': 0,
314                                         'last_max': 38,
315                                         'total_max': 38,
316                                         'total_min': 20}},
317                     'global': {'bad_hdr': 0, 'old_flow': 0}},
318         """
319         total_max = 0
320         average = 0
321         total_min = float("inf")
322         for chain_id in range(self.chain_count):
323             try:
324                 _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
325                 lat = in_stats['latency'][lat_pg_id]['latency']
326                 # dropped_pkts += lat['err_cntrs']['dropped']
327                 total_max = max(lat['total_max'], total_max)
328                 total_min = min(lat['total_min'], total_min)
329                 average += lat['average']
330             except KeyError:
331                 pass
332         if total_min == float("inf"):
333             total_min = 0
334         results['min_delay_usec'] = total_min
335         results['max_delay_usec'] = total_max
336         results['avg_delay_usec'] = int(average / self.chain_count)
337
338     def _bind_vxlan(self):
339         bind_layers(UDP, VXLAN, dport=4789)
340         bind_layers(VXLAN, Ether)
341
342     def _create_pkt(self, stream_cfg, l2frame_size):
343         """Create a packet of given size.
344
345         l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
346         """
347         # Trex will add the FCS field, so we need to remove 4 bytes from the l2 frame size
348         frame_size = int(l2frame_size) - 4
349         vm_param = []
350         if stream_cfg['vxlan'] is True:
351             self._bind_vxlan()
352             encap_level = '1'
353             pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
354             if stream_cfg['vtep_vlan'] is not None:
355                 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
356             pkt_base /= IP(src=stream_cfg['vtep_src_ip'], dst=stream_cfg['vtep_dst_ip'])
357             pkt_base /= UDP(sport=random.randint(1337, 32767), dport=4789)
358             pkt_base /= VXLAN(vni=stream_cfg['net_vni'])
359             pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
360             # need to randomize the outer header UDP src port based on flow
361             vxlan_udp_src_fv = STLVmFlowVar(
362                 name="vxlan_udp_src",
363                 min_value=1337,
364                 max_value=32767,
365                 size=2,
366                 op="random")
367             vm_param = [vxlan_udp_src_fv,
368                         STLVmWrFlowVar(fv_name="vxlan_udp_src", pkt_offset="UDP.sport")]
369         elif stream_cfg['mpls'] is True:
370             encap_level = '0'
371             pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
372             if stream_cfg['vtep_vlan'] is not None:
373                 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
374             if stream_cfg['mpls_outer_label'] is not None:
375                 pkt_base /= MPLS(label=stream_cfg['mpls_outer_label'], cos=1, s=0, ttl=255)
376             if stream_cfg['mpls_inner_label'] is not None:
377                 pkt_base /= MPLS(label=stream_cfg['mpls_inner_label'], cos=1, s=1, ttl=255)
378             #  Flow stats and MPLS labels randomization TBD
379             pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
380         else:
381             encap_level = '0'
382             pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
383
384         if stream_cfg['vlan_tag'] is not None:
385             pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
386
387         udp_args = {}
388         if stream_cfg['udp_src_port']:
389             udp_args['sport'] = int(stream_cfg['udp_src_port'])
390             udp_args['sport_step'] = int(stream_cfg['udp_port_step'])
391             udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
392         if stream_cfg['udp_dst_port']:
393             udp_args['dport'] = int(stream_cfg['udp_dst_port'])
394             udp_args['dport_step'] = int(stream_cfg['udp_port_step'])
395             udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
396
397         pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
398                     UDP(dport=udp_args['dport'], sport=udp_args['sport'])
399         if stream_cfg['ip_src_static'] is True:
400             src_max_ip_value = stream_cfg['ip_src_addr']
401         else:
402             src_max_ip_value = stream_cfg['ip_src_addr_max']
403         if stream_cfg['ip_addrs_step'] == 'random':
404             src_fv_ip = STLVmFlowVarRepeatableRandom(
405                 name="ip_src",
406                 min_value=stream_cfg['ip_src_addr'],
407                 max_value=src_max_ip_value,
408                 size=4,
409                 seed=random.randint(0, 32767),
410                 limit=stream_cfg['ip_src_count'])
411             dst_fv_ip = STLVmFlowVarRepeatableRandom(
412                 name="ip_dst",
413                 min_value=stream_cfg['ip_dst_addr'],
414                 max_value=stream_cfg['ip_dst_addr_max'],
415                 size=4,
416                 seed=random.randint(0, 32767),
417                 limit=stream_cfg['ip_dst_count'])
418         else:
419             src_fv_ip = STLVmFlowVar(
420                 name="ip_src",
421                 min_value=stream_cfg['ip_src_addr'],
422                 max_value=src_max_ip_value,
423                 size=4,
424                 op="inc",
425                 step=stream_cfg['ip_addrs_step'])
426             dst_fv_ip = STLVmFlowVar(
427                 name="ip_dst",
428                 min_value=stream_cfg['ip_dst_addr'],
429                 max_value=stream_cfg['ip_dst_addr_max'],
430                 size=4,
431                 op="inc",
432                 step=stream_cfg['ip_addrs_step'])
433
434         if stream_cfg['udp_port_step'] == 'random':
435             src_fv_port = STLVmFlowVarRepeatableRandom(
436                 name="p_src",
437                 min_value=udp_args['sport'],
438                 max_value=udp_args['sport_max'],
439                 size=2,
440                 seed=random.randint(0, 32767),
441                 limit=udp_args['udp_src_count'])
442             dst_fv_port = STLVmFlowVarRepeatableRandom(
443                 name="p_dst",
444                 min_value=udp_args['dport'],
445                 max_value=udp_args['dport_max'],
446                 size=2,
447                 seed=random.randint(0, 32767),
448                 limit=stream_cfg['udp_dst_count'])
449         else:
450             src_fv_port = STLVmFlowVar(
451                 name="p_src",
452                 min_value=udp_args['sport'],
453                 max_value=udp_args['sport_max'],
454                 size=2,
455                 op="inc",
456                 step=udp_args['sport_step'])
457             dst_fv_port = STLVmFlowVar(
458                 name="p_dst",
459                 min_value=udp_args['dport'],
460                 max_value=udp_args['dport_max'],
461                 size=2,
462                 op="inc",
463                 step=udp_args['dport_step'])
464         vm_param = [
465             src_fv_ip,
466             STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
467             src_fv_port,
468             STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
469             dst_fv_ip,
470             STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
471             dst_fv_port,
472             STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
473         ]
474         for encap in range(int(encap_level), -1, -1):
475             vm_param.append(STLVmFixChecksumHw(l3_offset="IP:{}".format(encap),
476                                                l4_offset="UDP:{}".format(encap),
477                                                l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP))
478         pad = max(0, frame_size - len(pkt_base)) * 'x'
479
480         return STLPktBuilder(pkt=pkt_base / pad,
481                              vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
482
483     def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
484                          e2e=False):
485         """Create a list of streams corresponding to a given chain and stream config.
486
487         port: port where the streams originate (0 or 1)
488         chain_id: the chain to which the streams are associated to
489         stream_cfg: stream configuration
490         l2frame: L2 frame size (including 4-byte FCS) or 'IMIX'
491         latency: if True also create a latency stream
492         e2e: True if performing "end to end" connectivity check
493         """
494         streams = []
495         pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
496         if self.config.no_flow_stats:
497             LOG.info("Traffic flow statistics are disabled.")
498         if l2frame == 'IMIX':
499             for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
500                 pkt = self._create_pkt(stream_cfg, l2_frame_size)
501                 if e2e or stream_cfg['mpls']:
502                     streams.append(STLStream(packet=pkt,
503                                              mode=STLTXCont(pps=ratio)))
504                 else:
505                     if stream_cfg['vxlan'] is True:
506                         streams.append(STLStream(packet=pkt,
507                                                  flow_stats=STLFlowStats(pg_id=pg_id,
508                                                                          vxlan=True)
509                                                  if not self.config.no_flow_stats else None,
510                                                  mode=STLTXCont(pps=ratio)))
511                     else:
512                         streams.append(STLStream(packet=pkt,
513                                                  flow_stats=STLFlowStats(pg_id=pg_id)
514                                                  if not self.config.no_flow_stats else None,
515                                                  mode=STLTXCont(pps=ratio)))
516
517             if latency:
518                 # for IMIX, the latency packets have the average IMIX packet size
519                 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
520
521         else:
522             l2frame_size = int(l2frame)
523             pkt = self._create_pkt(stream_cfg, l2frame_size)
524             if e2e or stream_cfg['mpls']:
525                 streams.append(STLStream(packet=pkt,
526                                          # Flow stats is disabled for MPLS now
527                                          # flow_stats=STLFlowStats(pg_id=pg_id),
528                                          mode=STLTXCont()))
529             else:
530                 if stream_cfg['vxlan'] is True:
531                     streams.append(STLStream(packet=pkt,
532                                              flow_stats=STLFlowStats(pg_id=pg_id,
533                                                                      vxlan=True)
534                                              if not self.config.no_flow_stats else None,
535                                              mode=STLTXCont()))
536                 else:
537                     streams.append(STLStream(packet=pkt,
538                                              flow_stats=STLFlowStats(pg_id=pg_id)
539                                              if not self.config.no_flow_stats else None,
540                                              mode=STLTXCont()))
541             # for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
542             # without vlan, the min l2 frame size is 64
543             # with vlan it is 68
544             # This only applies to the latency stream
545             if latency and stream_cfg['vlan_tag'] and l2frame_size < 68:
546                 pkt = self._create_pkt(stream_cfg, 68)
547
548         if latency:
549             if self.config.no_latency_stats:
550                 LOG.info("Latency flow statistics are disabled.")
551             if stream_cfg['vxlan'] is True:
552                 streams.append(STLStream(packet=pkt,
553                                          flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
554                                                                         vxlan=True)
555                                          if not self.config.no_latency_stats else None,
556                                          mode=STLTXCont(pps=self.LATENCY_PPS)))
557             else:
558                 streams.append(STLStream(packet=pkt,
559                                          flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
560                                          if not self.config.no_latency_stats else None,
561                                          mode=STLTXCont(pps=self.LATENCY_PPS)))
562         return streams
563
564     @timeout(5)
565     def __connect(self, client):
566         client.connect()
567
568     def __connect_after_start(self):
569         # after start, Trex may take a bit of time to initialize
570         # so we need to retry a few times
571         for it in range(self.config.generic_retry_count):
572             try:
573                 time.sleep(1)
574                 self.client.connect()
575                 break
576             except Exception as ex:
577                 if it == (self.config.generic_retry_count - 1):
578                     raise
579                 LOG.info("Retrying connection to TRex (%s)...", ex.msg)
580
581     def connect(self):
582         """Connect to the TRex server."""
583         server_ip = self.generator_config.ip
584         LOG.info("Connecting to TRex (%s)...", server_ip)
585
586         # Connect to TRex server
587         self.client = STLClient(server=server_ip, sync_port=self.generator_config.zmq_rpc_port,
588                                 async_port=self.generator_config.zmq_pub_port)
589         try:
590             self.__connect(self.client)
591             if server_ip == '127.0.0.1':
592                 config_updated = self.__check_config()
593                 if config_updated or self.config.restart:
594                     self.__restart()
595         except (TimeoutError, STLError) as e:
596             if server_ip == '127.0.0.1':
597                 self.__start_local_server()
598             else:
599                 raise TrafficGeneratorException(e.message)
600
601         ports = list(self.generator_config.ports)
602         self.port_handle = ports
603         # Prepare the ports
604         self.client.reset(ports)
605         # Read HW information from each port
606         # this returns an array of dict (1 per port)
607         """
608         Example of output for Intel XL710
609         [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
610           'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
611           u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
612           u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
613           u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
614           'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
615           u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
616           'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
617           'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
618           'layer_mode': 'Ethernet', u'numa': 0}, ...]
619         """
620         self.port_info = self.client.get_port_info(ports)
621         LOG.info('Connected to TRex')
622         for id, port in enumerate(self.port_info):
623             LOG.info('   Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
624                      id, port['description'], port['speed'], port['src_mac'],
625                      port['pci_addr'], port['driver'])
626         # Make sure the 2 ports have the same speed
627         if self.port_info[0]['speed'] != self.port_info[1]['speed']:
628             raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
629                                             (self.port_info[0]['speed'],
630                                              self.port_info[1]['speed']))
631
632     def __start_local_server(self):
633         try:
634             LOG.info("Starting TRex ...")
635             self.__start_server()
636             self.__connect_after_start()
637         except (TimeoutError, STLError) as e:
638             LOG.error('Cannot connect to TRex')
639             LOG.error(traceback.format_exc())
640             logpath = '/tmp/trex.log'
641             if os.path.isfile(logpath):
642                 # Wait for TRex to finish writing error message
643                 last_size = 0
644                 for _ in range(self.config.generic_retry_count):
645                     size = os.path.getsize(logpath)
646                     if size == last_size:
647                         # probably not writing anymore
648                         break
649                     last_size = size
650                     time.sleep(1)
651                 with open(logpath, 'r') as f:
652                     message = f.read()
653             else:
654                 message = e.message
655             raise TrafficGeneratorException(message)
656
657     def __start_server(self):
658         server = TRexTrafficServer()
659         server.run_server(self.generator_config)
660
661     def __check_config(self):
662         server = TRexTrafficServer()
663         return server.check_config_updated(self.generator_config)
664
665     def __restart(self):
666         LOG.info("Restarting TRex ...")
667         self.__stop_server()
668         # Wait for server stopped
669         for _ in range(self.config.generic_retry_count):
670             time.sleep(1)
671             if not self.client.is_connected():
672                 LOG.info("TRex is stopped...")
673                 break
674         self.__start_local_server()
675
676     def __stop_server(self):
677         if self.generator_config.ip == '127.0.0.1':
678             ports = self.client.get_acquired_ports()
679             LOG.info('Release ports %s and stopping TRex...', ports)
680             try:
681                 if ports:
682                     self.client.release(ports=ports)
683                 self.client.server_shutdown()
684             except STLError as e:
685                 LOG.warning('Unable to stop TRex. Error: %s', e)
686         else:
687             LOG.info('Using remote TRex. Unable to stop TRex')
688
689     def resolve_arp(self):
690         """Resolve all configured remote IP addresses.
691
692         return: None if ARP failed to resolve for all IP addresses
693                 else a dict of list of dest macs indexed by port#
694                 the dest macs in the list are indexed by the chain id
695         """
696         self.client.set_service_mode(ports=self.port_handle)
697         LOG.info('Polling ARP until successful...')
698         arp_dest_macs = {}
699         for port, device in zip(self.port_handle, self.generator_config.devices):
700             # there should be 1 stream config per chain
701             stream_configs = device.get_stream_configs()
702             chain_count = len(stream_configs)
703             ctx = self.client.create_service_ctx(port=port)
704             # all dest macs on this port indexed by chain ID
705             dst_macs = [None] * chain_count
706             dst_macs_count = 0
707             # the index in the list is the chain id
708             if self.config.vxlan or self.config.mpls:
709                 arps = [
710                     ServiceARP(ctx,
711                                src_ip=device.vtep_src_ip,
712                                dst_ip=device.vtep_dst_ip,
713                                vlan=device.vtep_vlan)
714                     for cfg in stream_configs
715                 ]
716             else:
717                 arps = [
718                     ServiceARP(ctx,
719                                src_ip=cfg['ip_src_tg_gw'],
720                                dst_ip=cfg['mac_discovery_gw'],
721                                # will be None if no vlan tagging
722                                vlan=cfg['vlan_tag'])
723                     for cfg in stream_configs
724                 ]
725
726             for attempt in range(self.config.generic_retry_count):
727                 try:
728                     ctx.run(arps)
729                 except STLError:
730                     LOG.error(traceback.format_exc())
731                     continue
732
733                 unresolved = []
734                 for chain_id, mac in enumerate(dst_macs):
735                     if not mac:
736                         arp_record = arps[chain_id].get_record()
737                         if arp_record.dst_mac:
738                             dst_macs[chain_id] = arp_record.dst_mac
739                             dst_macs_count += 1
740                             LOG.info('   ARP: port=%d chain=%d src IP=%s dst IP=%s -> MAC=%s',
741                                      port, chain_id,
742                                      arp_record.src_ip,
743                                      arp_record.dst_ip, arp_record.dst_mac)
744                         else:
745                             unresolved.append(arp_record.dst_ip)
746                 if dst_macs_count == chain_count:
747                     arp_dest_macs[port] = dst_macs
748                     LOG.info('ARP resolved successfully for port %s', port)
749                     break
750
751                 retry = attempt + 1
752                 LOG.info('Retrying ARP for: %s (retry %d/%d)',
753                          unresolved, retry, self.config.generic_retry_count)
754                 if retry < self.config.generic_retry_count:
755                     time.sleep(self.config.generic_poll_sec)
756             else:
757                 LOG.error('ARP timed out for port %s (resolved %d out of %d)',
758                           port,
759                           dst_macs_count,
760                           chain_count)
761                 break
762
763         # if the capture from the TRex console was started before the arp request step,
764         # it keeps 'service_mode' enabled, otherwise, it disables the 'service_mode'
765         if not self.config.service_mode:
766             self.client.set_service_mode(ports=self.port_handle, enabled=False)
767         if len(arp_dest_macs) == len(self.port_handle):
768             return arp_dest_macs
769         return None
770
771     def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
772         """Check if rate provided by user is above requirements. Applies only if latency is True."""
773         intf_speed = self.generator_config.intf_speed
774         if latency:
775             if bidirectional:
776                 mult = 2
777                 total_rate = 0
778                 for rate in rates:
779                     r = utils.convert_rates(l2frame_size, rate, intf_speed)
780                     total_rate += int(r['rate_pps'])
781             else:
782                 mult = 1
783                 total_rate = utils.convert_rates(l2frame_size, rates[0], intf_speed)
784             # rate must be enough for latency stream and at least 1 pps for base stream per chain
785             required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
786             result = utils.convert_rates(l2frame_size,
787                                          {'rate_pps': required_rate},
788                                          intf_speed * mult)
789             result['result'] = total_rate >= required_rate
790             return result
791
792         return {'result': True}
793
794     def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
795         """Program all the streams in Trex server.
796
797         l2frame_size: L2 frame size or IMIX
798         rates: a list of 2 rates to run each direction
799                each rate is a dict like {'rate_pps': '10kpps'}
800         bidirectional: True if bidirectional
801         latency: True if latency measurement is needed
802         e2e: True if performing "end to end" connectivity check
803         """
804         r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
805         if not r['result']:
806             raise TrafficGeneratorException(
807                 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
808                 .format(pps=r['rate_pps'],
809                         bps=r['rate_bps'],
810                         load=r['rate_percent']))
811         # a dict of list of streams indexed by port#
812         # in case of fixed size, has self.chain_count * 2 * 2 streams
813         # (1 normal + 1 latency stream per direction per chain)
814         # for IMIX, has self.chain_count * 2 * 4 streams
815         # (3 normal + 1 latency stream per direction per chain)
816         streamblock = {}
817         for port in self.port_handle:
818             streamblock[port] = []
819         stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
820         self.rates = [utils.to_rate_str(rate) for rate in rates]
821         for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
822             streamblock[0].extend(self.generate_streams(self.port_handle[0],
823                                                         chain_id,
824                                                         fwd_stream_cfg,
825                                                         l2frame_size,
826                                                         latency=latency,
827                                                         e2e=e2e))
828             if len(self.rates) > 1:
829                 streamblock[1].extend(self.generate_streams(self.port_handle[1],
830                                                             chain_id,
831                                                             rev_stream_cfg,
832                                                             l2frame_size,
833                                                             latency=bidirectional and latency,
834                                                             e2e=e2e))
835
836         for port in self.port_handle:
837             if self.config.vxlan:
838                 self.client.set_port_attr(ports=port, vxlan_fs=[4789])
839             else:
840                 self.client.set_port_attr(ports=port, vxlan_fs=None)
841             self.client.add_streams(streamblock[port], ports=port)
842             LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
843
844     def clear_streamblock(self):
845         """Clear all streams from TRex."""
846         self.rates = []
847         self.client.reset(self.port_handle)
848         LOG.info('Cleared all existing streams')
849
850     def get_stats(self):
851         """Get stats from Trex."""
852         stats = self.client.get_stats()
853         return self.extract_stats(stats)
854
855     def get_macs(self):
856         """Return the Trex local port MAC addresses.
857
858         return: a list of MAC addresses indexed by the port#
859         """
860         return [port['src_mac'] for port in self.port_info]
861
862     def get_port_speed_gbps(self):
863         """Return the Trex local port MAC addresses.
864
865         return: a list of speed in Gbps indexed by the port#
866         """
867         return [port['speed'] for port in self.port_info]
868
869     def clear_stats(self):
870         """Clear all stats in the traffic gneerator."""
871         if self.port_handle:
872             self.client.clear_stats()
873
874     def start_traffic(self):
875         """Start generating traffic in all ports."""
876         for port, rate in zip(self.port_handle, self.rates):
877             self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
878
879     def stop_traffic(self):
880         """Stop generating traffic."""
881         self.client.stop(ports=self.port_handle)
882
883     def start_capture(self):
884         """Capture all packets on both ports that are unicast to us."""
885         if self.capture_id:
886             self.stop_capture()
887         # Need to filter out unwanted packets so we do not end up counting
888         # src MACs of frames that are not unicast to us
889         src_mac_list = self.get_macs()
890         bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
891         # ports must be set in service in order to enable capture
892         self.client.set_service_mode(ports=self.port_handle)
893         self.capture_id = self.client.start_capture \
894             (rx_ports=self.port_handle, bpf_filter=bpf_filter)
895
896     def fetch_capture_packets(self):
897         """Fetch capture packets in capture mode."""
898         if self.capture_id:
899             self.packet_list = []
900             self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
901                                               output=self.packet_list)
902
903     def stop_capture(self):
904         """Stop capturing packets."""
905         if self.capture_id:
906             self.client.stop_capture(capture_id=self.capture_id['id'])
907             self.capture_id = None
908             # if the capture from TRex console was started before the connectivity step,
909             # it keeps 'service_mode' enabled, otherwise, it disables the 'service_mode'
910             if not self.config.service_mode:
911                 self.client.set_service_mode(ports=self.port_handle, enabled=False)
912
913     def cleanup(self):
914         """Cleanup Trex driver."""
915         if self.client:
916             try:
917                 self.client.reset(self.port_handle)
918                 self.client.disconnect()
919             except STLError:
920                 # TRex does not like a reset while in disconnected state
921                 pass
922
923     def set_service_mode(self, enabled=True):
924         """Enable/disable the 'service_mode'."""
925         self.client.set_service_mode(ports=self.port_handle, enabled=enabled)