NFVBENCH-169: UDP port random step correction
[nfvbench.git] / nfvbench / traffic_gen / trex_gen.py
1 # Copyright 2016 Cisco Systems, Inc.  All rights reserved.
2 #
3 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
4 #    not use this file except in compliance with the License. You may obtain
5 #    a copy of the License at
6 #
7 #         http://www.apache.org/licenses/LICENSE-2.0
8 #
9 #    Unless required by applicable law or agreed to in writing, software
10 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 #    License for the specific language governing permissions and limitations
13 #    under the License.
14 """Driver module for TRex traffic generator."""
15
16 import math
17 import os
18 import random
19 import time
20 import traceback
21
22 from itertools import count
23 # pylint: disable=import-error
24 from scapy.contrib.mpls import MPLS  # flake8: noqa
25 # pylint: enable=import-error
26 from nfvbench.log import LOG
27 from nfvbench.traffic_server import TRexTrafficServer
28 from nfvbench.utils import cast_integer
29 from nfvbench.utils import timeout
30 from nfvbench.utils import TimeoutError
31
32 # pylint: disable=import-error
33 from trex.common.services.trex_service_arp import ServiceARP
34 from trex.stl.api import bind_layers
35 from trex.stl.api import CTRexVmInsFixHwCs
36 from trex.stl.api import Dot1Q
37 from trex.stl.api import Ether
38 from trex.stl.api import FlagsField
39 from trex.stl.api import IP
40 from trex.stl.api import Packet
41 from trex.stl.api import STLClient
42 from trex.stl.api import STLError
43 from trex.stl.api import STLFlowLatencyStats
44 from trex.stl.api import STLFlowStats
45 from trex.stl.api import STLPktBuilder
46 from trex.stl.api import STLScVmRaw
47 from trex.stl.api import STLStream
48 from trex.stl.api import STLTXCont
49 from trex.stl.api import STLVmFixChecksumHw
50 from trex.stl.api import STLVmFixIpv4
51 from trex.stl.api import STLVmFlowVar
52 from trex.stl.api import STLVmFlowVarRepeatableRandom
53 from trex.stl.api import STLVmWrFlowVar
54 from trex.stl.api import ThreeBytesField
55 from trex.stl.api import UDP
56 from trex.stl.api import XByteField
57
58 # pylint: enable=import-error
59
60 from .traffic_base import AbstractTrafficGenerator
61 from .traffic_base import TrafficGeneratorException
62 from . import traffic_utils as utils
63 from .traffic_utils import IMIX_AVG_L2_FRAME_SIZE
64 from .traffic_utils import IMIX_L2_SIZES
65 from .traffic_utils import IMIX_RATIOS
66
67 class VXLAN(Packet):
68     """VxLAN class."""
69
70     _VXLAN_FLAGS = ['R' * 27] + ['I'] + ['R' * 5]
71     name = "VXLAN"
72     fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS),
73                    ThreeBytesField("vni", 0),
74                    XByteField("reserved", 0x00)]
75
76     def mysummary(self):
77         """Summary."""
78         return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
79
80 class TRex(AbstractTrafficGenerator):
81     """TRex traffic generator driver."""
82
83     LATENCY_PPS = 1000
84     CHAIN_PG_ID_MASK = 0x007F
85     PORT_PG_ID_MASK = 0x0080
86     LATENCY_PG_ID_MASK = 0x0100
87
88     def __init__(self, traffic_client):
89         """Trex driver."""
90         AbstractTrafficGenerator.__init__(self, traffic_client)
91         self.client = None
92         self.id = count()
93         self.port_handle = []
94         self.chain_count = self.generator_config.service_chain_count
95         self.rates = []
96         self.capture_id = None
97         self.packet_list = []
98
99     def get_version(self):
100         """Get the Trex version."""
101         return self.client.get_server_version() if self.client else ''
102
103     def get_pg_id(self, port, chain_id):
104         """Calculate the packet group IDs to use for a given port/stream type/chain_id.
105
106         port: 0 or 1
107         chain_id: identifies to which chain the pg_id is associated (0 to 255)
108         return: pg_id, lat_pg_id
109
110         We use a bit mask to set up the 3 fields:
111         0x007F: chain ID (8 bits for a max of 128 chains)
112         0x0080: port bit
113         0x0100: latency bit
114         """
115         pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
116         return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
117
118     def extract_stats(self, in_stats):
119         """Extract stats from dict returned by Trex API.
120
121         :param in_stats: dict as returned by TRex api
122         """
123         utils.nan_replace(in_stats)
124         # LOG.debug(in_stats)
125
126         result = {}
127         # port_handles should have only 2 elements: [0, 1]
128         # so (1 - ph) will be the index for the far end port
129         for ph in self.port_handle:
130             stats = in_stats[ph]
131             far_end_stats = in_stats[1 - ph]
132             result[ph] = {
133                 'tx': {
134                     'total_pkts': cast_integer(stats['opackets']),
135                     'total_pkt_bytes': cast_integer(stats['obytes']),
136                     'pkt_rate': cast_integer(stats['tx_pps']),
137                     'pkt_bit_rate': cast_integer(stats['tx_bps'])
138                 },
139                 'rx': {
140                     'total_pkts': cast_integer(stats['ipackets']),
141                     'total_pkt_bytes': cast_integer(stats['ibytes']),
142                     'pkt_rate': cast_integer(stats['rx_pps']),
143                     'pkt_bit_rate': cast_integer(stats['rx_bps']),
144                     # how many pkts were dropped in RX direction
145                     # need to take the tx counter on the far end port
146                     'dropped_pkts': cast_integer(
147                         far_end_stats['opackets'] - stats['ipackets'])
148                 }
149             }
150             self.__combine_latencies(in_stats, result[ph]['rx'], ph)
151
152         total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
153         result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
154         result["flow_stats"] = in_stats["flow_stats"]
155         result["latency"] = in_stats["latency"]
156         return result
157
158     def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
159         """Extract the aggregated stats for a given chain.
160
161         trex_stats: stats as returned by get_stats()
162         if_stats: a list of 2 interface stats to update (port 0 and 1)
163         latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
164                    latencies[p] is the latency for packets sent on port p
165                    if there are no latency streams, the Latency instances are not modified
166         chain_idx: chain index of the interface stats
167
168         The packet counts include normal and latency streams.
169
170         Trex returns flows stats as follows:
171
172         'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
173                    'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
174                    'rx_bytes': {0: nan, 1: nan, 'total': nan},
175                    'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
176                    'rx_pps': {0: 0, 1: 0, 'total': 0},
177                    'tx_bps': {0: 0, 1: 0, 'total': 0},
178                    'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
179                    'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
180                    'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
181                    'tx_pps': {0: 0, 1: 0, 'total': 0}},
182                1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
183                    'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
184                    'rx_bytes': {0: nan, 1: nan, 'total': nan},
185                    'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
186                    'rx_pps': {0: 0, 1: 0, 'total': 0},
187                    'tx_bps': {0: 0, 1: 0, 'total': 0},
188                    'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
189                    'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
190                    'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
191                    'tx_pps': {0: 0, 1: 0, 'total': 0}},
192                 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
193                 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
194                 'rx_bytes': {0: nan, 1: nan, 'total': nan},
195                 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
196                 'rx_pps': {0: 0, 1: 0, 'total': 0},
197                 'tx_bps': {0: 0, 1: 0, 'total': 0},
198                 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
199                 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
200                 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
201                 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
202
203         the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
204         get_pg_id() method.
205         packet counters for a given stream sent on port p are reported as:
206         - tx_pkts[p] on port p
207         - rx_pkts[1-p] on the far end port
208
209         This is a tricky/critical counter transposition operation because
210         the results are grouped by port (not by stream):
211         tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
212         rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
213         tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
214         rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
215
216         or using a more generic formula:
217         tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
218         rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
219
220         the second formula is equivalent to
221         rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
222
223         If there are latency streams, those same counters need to be added in the same way
224         """
225         def get_latency(lval):
226             try:
227                 return int(round(lval))
228             except ValueError:
229                 return 0
230
231         for ifs in if_stats:
232             ifs.tx = ifs.rx = 0
233         for port in range(2):
234             pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
235             for pid in [pg_id, lat_pg_id]:
236                 try:
237                     pg_stats = trex_stats['flow_stats'][pid]
238                     if_stats[port].tx += pg_stats['tx_pkts'][port]
239                     if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
240                 except KeyError:
241                     pass
242             try:
243                 lat = trex_stats['latency'][lat_pg_id]['latency']
244                 # dropped_pkts += lat['err_cntrs']['dropped']
245                 latencies[port].max_usec = get_latency(lat['total_max'])
246                 if math.isnan(lat['total_min']):
247                     latencies[port].min_usec = 0
248                     latencies[port].avg_usec = 0
249                 else:
250                     latencies[port].min_usec = get_latency(lat['total_min'])
251                     latencies[port].avg_usec = get_latency(lat['average'])
252                 # pick up the HDR histogram if present (otherwise will raise KeyError)
253                 latencies[port].hdrh = lat['hdrh']
254             except KeyError:
255                 pass
256
257     def __combine_latencies(self, in_stats, results, port_handle):
258         """Traverse TRex result dictionary and combines chosen latency stats.
259
260           example of latency dict returned by trex (2 chains):
261          'latency': {256: {'err_cntrs': {'dropped': 0,
262                                  'dup': 0,
263                                  'out_of_order': 0,
264                                  'seq_too_high': 0,
265                                  'seq_too_low': 0},
266                             'latency': {'average': 26.5,
267                                         'hdrh': u'HISTFAAAAEx4nJNpmSgz...bFRgxi',
268                                         'histogram': {20: 303,
269                                                         30: 320,
270                                                         40: 300,
271                                                         50: 73,
272                                                         60: 4,
273                                                         70: 1},
274                                         'jitter': 14,
275                                         'last_max': 63,
276                                         'total_max': 63,
277                                         'total_min': 20}},
278                     257: {'err_cntrs': {'dropped': 0,
279                                         'dup': 0,
280                                         'out_of_order': 0,
281                                         'seq_too_high': 0,
282                                         'seq_too_low': 0},
283                             'latency': {'average': 29.75,
284                                         'hdrh': u'HISTFAAAAEV4nJN...CALilDG0=',
285                                         'histogram': {20: 261,
286                                                         30: 431,
287                                                         40: 3,
288                                                         50: 80,
289                                                         60: 225},
290                                         'jitter': 23,
291                                         'last_max': 67,
292                                         'total_max': 67,
293                                         'total_min': 20}},
294                     384: {'err_cntrs': {'dropped': 0,
295                                         'dup': 0,
296                                         'out_of_order': 0,
297                                         'seq_too_high': 0,
298                                         'seq_too_low': 0},
299                             'latency': {'average': 18.0,
300                                         'hdrh': u'HISTFAAAADR4nJNpm...MjCwDDxAZG',
301                                         'histogram': {20: 987, 30: 14},
302                                         'jitter': 0,
303                                         'last_max': 34,
304                                         'total_max': 34,
305                                         'total_min': 20}},
306                     385: {'err_cntrs': {'dropped': 0,
307                                     'dup': 0,
308                                     'out_of_order': 0,
309                                     'seq_too_high': 0,
310                                     'seq_too_low': 0},
311                             'latency': {'average': 19.0,
312                                         'hdrh': u'HISTFAAAADR4nJNpm...NkYmJgDdagfK',
313                                         'histogram': {20: 989, 30: 11},
314                                         'jitter': 0,
315                                         'last_max': 38,
316                                         'total_max': 38,
317                                         'total_min': 20}},
318                     'global': {'bad_hdr': 0, 'old_flow': 0}},
319         """
320         total_max = 0
321         average = 0
322         total_min = float("inf")
323         for chain_id in range(self.chain_count):
324             try:
325                 _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
326                 lat = in_stats['latency'][lat_pg_id]['latency']
327                 # dropped_pkts += lat['err_cntrs']['dropped']
328                 total_max = max(lat['total_max'], total_max)
329                 total_min = min(lat['total_min'], total_min)
330                 average += lat['average']
331             except KeyError:
332                 pass
333         if total_min == float("inf"):
334             total_min = 0
335         results['min_delay_usec'] = total_min
336         results['max_delay_usec'] = total_max
337         results['avg_delay_usec'] = int(average / self.chain_count)
338
339     def _bind_vxlan(self):
340         bind_layers(UDP, VXLAN, dport=4789)
341         bind_layers(VXLAN, Ether)
342
343     def _create_pkt(self, stream_cfg, l2frame_size):
344         """Create a packet of given size.
345
346         l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
347         """
348         # Trex will add the FCS field, so we need to remove 4 bytes from the l2 frame size
349         frame_size = int(l2frame_size) - 4
350         vm_param = []
351         if stream_cfg['vxlan'] is True:
352             self._bind_vxlan()
353             encap_level = '1'
354             pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
355             if stream_cfg['vtep_vlan'] is not None:
356                 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
357             pkt_base /= IP(src=stream_cfg['vtep_src_ip'], dst=stream_cfg['vtep_dst_ip'])
358             pkt_base /= UDP(sport=random.randint(1337, 32767), dport=4789)
359             pkt_base /= VXLAN(vni=stream_cfg['net_vni'])
360             pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
361             # need to randomize the outer header UDP src port based on flow
362             vxlan_udp_src_fv = STLVmFlowVar(
363                 name="vxlan_udp_src",
364                 min_value=1337,
365                 max_value=32767,
366                 size=2,
367                 op="random")
368             vm_param = [vxlan_udp_src_fv,
369                         STLVmWrFlowVar(fv_name="vxlan_udp_src", pkt_offset="UDP.sport")]
370         elif stream_cfg['mpls'] is True:
371             encap_level = '0'
372             pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
373             if stream_cfg['vtep_vlan'] is not None:
374                 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
375             if stream_cfg['mpls_outer_label'] is not None:
376                 pkt_base /= MPLS(label=stream_cfg['mpls_outer_label'], cos=1, s=0, ttl=255)
377             if stream_cfg['mpls_inner_label'] is not None:
378                 pkt_base /= MPLS(label=stream_cfg['mpls_inner_label'], cos=1, s=1, ttl=255)
379             #  Flow stats and MPLS labels randomization TBD
380             pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
381         else:
382             encap_level = '0'
383             pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
384
385         if stream_cfg['vlan_tag'] is not None:
386             pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
387
388         udp_args = {}
389         if stream_cfg['udp_src_port']:
390             udp_args['sport'] = int(stream_cfg['udp_src_port'])
391             if stream_cfg['udp_port_step'] == 'random':
392                 step = 1
393             else:
394                 step = stream_cfg['udp_port_step']
395             udp_args['sport_step'] = int(step)
396             udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
397         if stream_cfg['udp_dst_port']:
398             udp_args['dport'] = int(stream_cfg['udp_dst_port'])
399             if stream_cfg['udp_port_step'] == 'random':
400                 step = 1
401             else:
402                 step = stream_cfg['udp_port_step']
403             udp_args['dport_step'] = int(step)
404             udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
405
406         pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
407                     UDP(dport=udp_args['dport'], sport=udp_args['sport'])
408         if stream_cfg['ip_src_static'] is True:
409             src_max_ip_value = stream_cfg['ip_src_addr']
410         else:
411             src_max_ip_value = stream_cfg['ip_src_addr_max']
412         if stream_cfg['ip_addrs_step'] == 'random':
413             src_fv_ip = STLVmFlowVarRepeatableRandom(
414                 name="ip_src",
415                 min_value=stream_cfg['ip_src_addr'],
416                 max_value=src_max_ip_value,
417                 size=4,
418                 seed=random.randint(0, 32767),
419                 limit=stream_cfg['ip_src_count'])
420             dst_fv_ip = STLVmFlowVarRepeatableRandom(
421                 name="ip_dst",
422                 min_value=stream_cfg['ip_dst_addr'],
423                 max_value=stream_cfg['ip_dst_addr_max'],
424                 size=4,
425                 seed=random.randint(0, 32767),
426                 limit=stream_cfg['ip_dst_count'])
427         else:
428             src_fv_ip = STLVmFlowVar(
429                 name="ip_src",
430                 min_value=stream_cfg['ip_src_addr'],
431                 max_value=src_max_ip_value,
432                 size=4,
433                 op="inc",
434                 step=stream_cfg['ip_addrs_step'])
435             dst_fv_ip = STLVmFlowVar(
436                 name="ip_dst",
437                 min_value=stream_cfg['ip_dst_addr'],
438                 max_value=stream_cfg['ip_dst_addr_max'],
439                 size=4,
440                 op="inc",
441                 step=stream_cfg['ip_addrs_step'])
442
443         if stream_cfg['udp_port_step'] == 'random':
444             src_fv_port = STLVmFlowVarRepeatableRandom(
445                 name="p_src",
446                 min_value=udp_args['sport'],
447                 max_value=udp_args['sport_max'],
448                 size=2,
449                 seed=random.randint(0, 32767),
450                 limit=stream_cfg['udp_src_count'])
451             dst_fv_port = STLVmFlowVarRepeatableRandom(
452                 name="p_dst",
453                 min_value=udp_args['dport'],
454                 max_value=udp_args['dport_max'],
455                 size=2,
456                 seed=random.randint(0, 32767),
457                 limit=stream_cfg['udp_dst_count'])
458         else:
459             src_fv_port = STLVmFlowVar(
460                 name="p_src",
461                 min_value=udp_args['sport'],
462                 max_value=udp_args['sport_max'],
463                 size=2,
464                 op="inc",
465                 step=udp_args['sport_step'])
466             dst_fv_port = STLVmFlowVar(
467                 name="p_dst",
468                 min_value=udp_args['dport'],
469                 max_value=udp_args['dport_max'],
470                 size=2,
471                 op="inc",
472                 step=udp_args['dport_step'])
473         vm_param = [
474             src_fv_ip,
475             STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
476             src_fv_port,
477             STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
478             dst_fv_ip,
479             STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
480             dst_fv_port,
481             STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
482         ]
483         # Use HW Offload to calculate the outter IP/UDP packet
484         vm_param.append(STLVmFixChecksumHw(l3_offset="IP:0",
485                                            l4_offset="UDP:0",
486                                            l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP))
487         # Use software to fix the inner IP/UDP payload for VxLAN packets
488         if int(encap_level):
489             vm_param.append(STLVmFixIpv4(offset="IP:1"))
490         pad = max(0, frame_size - len(pkt_base)) * 'x'
491
492         return STLPktBuilder(pkt=pkt_base / pad,
493                              vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
494
495     def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
496                          e2e=False):
497         """Create a list of streams corresponding to a given chain and stream config.
498
499         port: port where the streams originate (0 or 1)
500         chain_id: the chain to which the streams are associated to
501         stream_cfg: stream configuration
502         l2frame: L2 frame size (including 4-byte FCS) or 'IMIX'
503         latency: if True also create a latency stream
504         e2e: True if performing "end to end" connectivity check
505         """
506         streams = []
507         pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
508         if self.config.no_flow_stats:
509             LOG.info("Traffic flow statistics are disabled.")
510         if l2frame == 'IMIX':
511             for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
512                 pkt = self._create_pkt(stream_cfg, l2_frame_size)
513                 if e2e or stream_cfg['mpls']:
514                     streams.append(STLStream(packet=pkt,
515                                              mode=STLTXCont(pps=ratio)))
516                 else:
517                     if stream_cfg['vxlan'] is True:
518                         streams.append(STLStream(packet=pkt,
519                                                  flow_stats=STLFlowStats(pg_id=pg_id,
520                                                                          vxlan=True)
521                                                  if not self.config.no_flow_stats else None,
522                                                  mode=STLTXCont(pps=ratio)))
523                     else:
524                         streams.append(STLStream(packet=pkt,
525                                                  flow_stats=STLFlowStats(pg_id=pg_id)
526                                                  if not self.config.no_flow_stats else None,
527                                                  mode=STLTXCont(pps=ratio)))
528
529             if latency:
530                 # for IMIX, the latency packets have the average IMIX packet size
531                 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
532
533         else:
534             l2frame_size = int(l2frame)
535             pkt = self._create_pkt(stream_cfg, l2frame_size)
536             if e2e or stream_cfg['mpls']:
537                 streams.append(STLStream(packet=pkt,
538                                          # Flow stats is disabled for MPLS now
539                                          # flow_stats=STLFlowStats(pg_id=pg_id),
540                                          mode=STLTXCont()))
541             else:
542                 if stream_cfg['vxlan'] is True:
543                     streams.append(STLStream(packet=pkt,
544                                              flow_stats=STLFlowStats(pg_id=pg_id,
545                                                                      vxlan=True)
546                                              if not self.config.no_flow_stats else None,
547                                              mode=STLTXCont()))
548                 else:
549                     streams.append(STLStream(packet=pkt,
550                                              flow_stats=STLFlowStats(pg_id=pg_id)
551                                              if not self.config.no_flow_stats else None,
552                                              mode=STLTXCont()))
553             # for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
554             # without vlan, the min l2 frame size is 64
555             # with vlan it is 68
556             # This only applies to the latency stream
557             if latency and stream_cfg['vlan_tag'] and l2frame_size < 68:
558                 pkt = self._create_pkt(stream_cfg, 68)
559
560         if latency:
561             if self.config.no_latency_stats:
562                 LOG.info("Latency flow statistics are disabled.")
563             if stream_cfg['vxlan'] is True:
564                 streams.append(STLStream(packet=pkt,
565                                          flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
566                                                                         vxlan=True)
567                                          if not self.config.no_latency_stats else None,
568                                          mode=STLTXCont(pps=self.LATENCY_PPS)))
569             else:
570                 streams.append(STLStream(packet=pkt,
571                                          flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
572                                          if not self.config.no_latency_stats else None,
573                                          mode=STLTXCont(pps=self.LATENCY_PPS)))
574         return streams
575
576     @timeout(5)
577     def __connect(self, client):
578         client.connect()
579
580     def __connect_after_start(self):
581         # after start, Trex may take a bit of time to initialize
582         # so we need to retry a few times
583         for it in range(self.config.generic_retry_count):
584             try:
585                 time.sleep(1)
586                 self.client.connect()
587                 break
588             except Exception as ex:
589                 if it == (self.config.generic_retry_count - 1):
590                     raise
591                 LOG.info("Retrying connection to TRex (%s)...", ex.msg)
592
593     def connect(self):
594         """Connect to the TRex server."""
595         server_ip = self.generator_config.ip
596         LOG.info("Connecting to TRex (%s)...", server_ip)
597
598         # Connect to TRex server
599         self.client = STLClient(server=server_ip, sync_port=self.generator_config.zmq_rpc_port,
600                                 async_port=self.generator_config.zmq_pub_port)
601         try:
602             self.__connect(self.client)
603             if server_ip == '127.0.0.1':
604                 config_updated = self.__check_config()
605                 if config_updated or self.config.restart:
606                     self.__restart()
607         except (TimeoutError, STLError) as e:
608             if server_ip == '127.0.0.1':
609                 self.__start_local_server()
610             else:
611                 raise TrafficGeneratorException(e.message)
612
613         ports = list(self.generator_config.ports)
614         self.port_handle = ports
615         # Prepare the ports
616         self.client.reset(ports)
617         # Read HW information from each port
618         # this returns an array of dict (1 per port)
619         """
620         Example of output for Intel XL710
621         [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
622           'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
623           u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
624           u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
625           u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
626           'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
627           u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
628           'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
629           'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
630           'layer_mode': 'Ethernet', u'numa': 0}, ...]
631         """
632         self.port_info = self.client.get_port_info(ports)
633         LOG.info('Connected to TRex')
634         for id, port in enumerate(self.port_info):
635             LOG.info('   Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
636                      id, port['description'], port['speed'], port['src_mac'],
637                      port['pci_addr'], port['driver'])
638         # Make sure the 2 ports have the same speed
639         if self.port_info[0]['speed'] != self.port_info[1]['speed']:
640             raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
641                                             (self.port_info[0]['speed'],
642                                              self.port_info[1]['speed']))
643
644     def __start_local_server(self):
645         try:
646             LOG.info("Starting TRex ...")
647             self.__start_server()
648             self.__connect_after_start()
649         except (TimeoutError, STLError) as e:
650             LOG.error('Cannot connect to TRex')
651             LOG.error(traceback.format_exc())
652             logpath = '/tmp/trex.log'
653             if os.path.isfile(logpath):
654                 # Wait for TRex to finish writing error message
655                 last_size = 0
656                 for _ in range(self.config.generic_retry_count):
657                     size = os.path.getsize(logpath)
658                     if size == last_size:
659                         # probably not writing anymore
660                         break
661                     last_size = size
662                     time.sleep(1)
663                 with open(logpath, 'r') as f:
664                     message = f.read()
665             else:
666                 message = e.message
667             raise TrafficGeneratorException(message)
668
669     def __start_server(self):
670         server = TRexTrafficServer()
671         server.run_server(self.generator_config)
672
673     def __check_config(self):
674         server = TRexTrafficServer()
675         return server.check_config_updated(self.generator_config)
676
677     def __restart(self):
678         LOG.info("Restarting TRex ...")
679         self.__stop_server()
680         # Wait for server stopped
681         for _ in range(self.config.generic_retry_count):
682             time.sleep(1)
683             if not self.client.is_connected():
684                 LOG.info("TRex is stopped...")
685                 break
686         self.__start_local_server()
687
688     def __stop_server(self):
689         if self.generator_config.ip == '127.0.0.1':
690             ports = self.client.get_acquired_ports()
691             LOG.info('Release ports %s and stopping TRex...', ports)
692             try:
693                 if ports:
694                     self.client.release(ports=ports)
695                 self.client.server_shutdown()
696             except STLError as e:
697                 LOG.warning('Unable to stop TRex. Error: %s', e)
698         else:
699             LOG.info('Using remote TRex. Unable to stop TRex')
700
701     def resolve_arp(self):
702         """Resolve all configured remote IP addresses.
703
704         return: None if ARP failed to resolve for all IP addresses
705                 else a dict of list of dest macs indexed by port#
706                 the dest macs in the list are indexed by the chain id
707         """
708         self.client.set_service_mode(ports=self.port_handle)
709         LOG.info('Polling ARP until successful...')
710         arp_dest_macs = {}
711         for port, device in zip(self.port_handle, self.generator_config.devices):
712             # there should be 1 stream config per chain
713             stream_configs = device.get_stream_configs()
714             chain_count = len(stream_configs)
715             ctx = self.client.create_service_ctx(port=port)
716             # all dest macs on this port indexed by chain ID
717             dst_macs = [None] * chain_count
718             dst_macs_count = 0
719             # the index in the list is the chain id
720             if self.config.vxlan or self.config.mpls:
721                 arps = [
722                     ServiceARP(ctx,
723                                src_ip=device.vtep_src_ip,
724                                dst_ip=device.vtep_dst_ip,
725                                vlan=device.vtep_vlan)
726                     for cfg in stream_configs
727                 ]
728             else:
729                 arps = [
730                     ServiceARP(ctx,
731                                src_ip=cfg['ip_src_tg_gw'],
732                                dst_ip=cfg['mac_discovery_gw'],
733                                # will be None if no vlan tagging
734                                vlan=cfg['vlan_tag'])
735                     for cfg in stream_configs
736                 ]
737
738             for attempt in range(self.config.generic_retry_count):
739                 try:
740                     ctx.run(arps)
741                 except STLError:
742                     LOG.error(traceback.format_exc())
743                     continue
744
745                 unresolved = []
746                 for chain_id, mac in enumerate(dst_macs):
747                     if not mac:
748                         arp_record = arps[chain_id].get_record()
749                         if arp_record.dst_mac:
750                             dst_macs[chain_id] = arp_record.dst_mac
751                             dst_macs_count += 1
752                             LOG.info('   ARP: port=%d chain=%d src IP=%s dst IP=%s -> MAC=%s',
753                                      port, chain_id,
754                                      arp_record.src_ip,
755                                      arp_record.dst_ip, arp_record.dst_mac)
756                         else:
757                             unresolved.append(arp_record.dst_ip)
758                 if dst_macs_count == chain_count:
759                     arp_dest_macs[port] = dst_macs
760                     LOG.info('ARP resolved successfully for port %s', port)
761                     break
762
763                 retry = attempt + 1
764                 LOG.info('Retrying ARP for: %s (retry %d/%d)',
765                          unresolved, retry, self.config.generic_retry_count)
766                 if retry < self.config.generic_retry_count:
767                     time.sleep(self.config.generic_poll_sec)
768             else:
769                 LOG.error('ARP timed out for port %s (resolved %d out of %d)',
770                           port,
771                           dst_macs_count,
772                           chain_count)
773                 break
774
775         # if the capture from the TRex console was started before the arp request step,
776         # it keeps 'service_mode' enabled, otherwise, it disables the 'service_mode'
777         if not self.config.service_mode:
778             self.client.set_service_mode(ports=self.port_handle, enabled=False)
779         if len(arp_dest_macs) == len(self.port_handle):
780             return arp_dest_macs
781         return None
782
783     def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
784         """Check if rate provided by user is above requirements. Applies only if latency is True."""
785         intf_speed = self.generator_config.intf_speed
786         if latency:
787             if bidirectional:
788                 mult = 2
789                 total_rate = 0
790                 for rate in rates:
791                     r = utils.convert_rates(l2frame_size, rate, intf_speed)
792                     total_rate += int(r['rate_pps'])
793             else:
794                 mult = 1
795                 total_rate = utils.convert_rates(l2frame_size, rates[0], intf_speed)
796             # rate must be enough for latency stream and at least 1 pps for base stream per chain
797             required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
798             result = utils.convert_rates(l2frame_size,
799                                          {'rate_pps': required_rate},
800                                          intf_speed * mult)
801             result['result'] = total_rate >= required_rate
802             return result
803
804         return {'result': True}
805
806     def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
807         """Program all the streams in Trex server.
808
809         l2frame_size: L2 frame size or IMIX
810         rates: a list of 2 rates to run each direction
811                each rate is a dict like {'rate_pps': '10kpps'}
812         bidirectional: True if bidirectional
813         latency: True if latency measurement is needed
814         e2e: True if performing "end to end" connectivity check
815         """
816         r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
817         if not r['result']:
818             raise TrafficGeneratorException(
819                 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
820                 .format(pps=r['rate_pps'],
821                         bps=r['rate_bps'],
822                         load=r['rate_percent']))
823         # a dict of list of streams indexed by port#
824         # in case of fixed size, has self.chain_count * 2 * 2 streams
825         # (1 normal + 1 latency stream per direction per chain)
826         # for IMIX, has self.chain_count * 2 * 4 streams
827         # (3 normal + 1 latency stream per direction per chain)
828         streamblock = {}
829         for port in self.port_handle:
830             streamblock[port] = []
831         stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
832         self.rates = [utils.to_rate_str(rate) for rate in rates]
833         for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
834             streamblock[0].extend(self.generate_streams(self.port_handle[0],
835                                                         chain_id,
836                                                         fwd_stream_cfg,
837                                                         l2frame_size,
838                                                         latency=latency,
839                                                         e2e=e2e))
840             if len(self.rates) > 1:
841                 streamblock[1].extend(self.generate_streams(self.port_handle[1],
842                                                             chain_id,
843                                                             rev_stream_cfg,
844                                                             l2frame_size,
845                                                             latency=bidirectional and latency,
846                                                             e2e=e2e))
847
848         for port in self.port_handle:
849             if self.config.vxlan:
850                 self.client.set_port_attr(ports=port, vxlan_fs=[4789])
851             else:
852                 self.client.set_port_attr(ports=port, vxlan_fs=None)
853             self.client.add_streams(streamblock[port], ports=port)
854             LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
855
856     def clear_streamblock(self):
857         """Clear all streams from TRex."""
858         self.rates = []
859         self.client.reset(self.port_handle)
860         LOG.info('Cleared all existing streams')
861
862     def get_stats(self):
863         """Get stats from Trex."""
864         stats = self.client.get_stats()
865         return self.extract_stats(stats)
866
867     def get_macs(self):
868         """Return the Trex local port MAC addresses.
869
870         return: a list of MAC addresses indexed by the port#
871         """
872         return [port['src_mac'] for port in self.port_info]
873
874     def get_port_speed_gbps(self):
875         """Return the Trex local port MAC addresses.
876
877         return: a list of speed in Gbps indexed by the port#
878         """
879         return [port['speed'] for port in self.port_info]
880
881     def clear_stats(self):
882         """Clear all stats in the traffic gneerator."""
883         if self.port_handle:
884             self.client.clear_stats()
885
886     def start_traffic(self):
887         """Start generating traffic in all ports."""
888         for port, rate in zip(self.port_handle, self.rates):
889             self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
890
891     def stop_traffic(self):
892         """Stop generating traffic."""
893         self.client.stop(ports=self.port_handle)
894
895     def start_capture(self):
896         """Capture all packets on both ports that are unicast to us."""
897         if self.capture_id:
898             self.stop_capture()
899         # Need to filter out unwanted packets so we do not end up counting
900         # src MACs of frames that are not unicast to us
901         src_mac_list = self.get_macs()
902         bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
903         # ports must be set in service in order to enable capture
904         self.client.set_service_mode(ports=self.port_handle)
905         self.capture_id = self.client.start_capture \
906             (rx_ports=self.port_handle, bpf_filter=bpf_filter)
907
908     def fetch_capture_packets(self):
909         """Fetch capture packets in capture mode."""
910         if self.capture_id:
911             self.packet_list = []
912             self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
913                                               output=self.packet_list)
914
915     def stop_capture(self):
916         """Stop capturing packets."""
917         if self.capture_id:
918             self.client.stop_capture(capture_id=self.capture_id['id'])
919             self.capture_id = None
920             # if the capture from TRex console was started before the connectivity step,
921             # it keeps 'service_mode' enabled, otherwise, it disables the 'service_mode'
922             if not self.config.service_mode:
923                 self.client.set_service_mode(ports=self.port_handle, enabled=False)
924
925     def cleanup(self):
926         """Cleanup Trex driver."""
927         if self.client:
928             try:
929                 self.client.reset(self.port_handle)
930                 self.client.disconnect()
931             except STLError:
932                 # TRex does not like a reset while in disconnected state
933                 pass
934
935     def set_service_mode(self, enabled=True):
936         """Enable/disable the 'service_mode'."""
937         self.client.set_service_mode(ports=self.port_handle, enabled=enabled)