Merge "Auto PCI picking issue fix"
[nfvbench.git] / nfvbench / traffic_gen / trex_gen.py
1 # Copyright 2016 Cisco Systems, Inc.  All rights reserved.
2 #
3 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
4 #    not use this file except in compliance with the License. You may obtain
5 #    a copy of the License at
6 #
7 #         http://www.apache.org/licenses/LICENSE-2.0
8 #
9 #    Unless required by applicable law or agreed to in writing, software
10 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 #    License for the specific language governing permissions and limitations
13 #    under the License.
14 """Driver module for TRex traffic generator."""
15
16 import math
17 import os
18 import random
19 import time
20 import traceback
21
22 from itertools import count
23 # pylint: disable=import-error
24 from scapy.contrib.mpls import MPLS  # flake8: noqa
25 # pylint: enable=import-error
26 from nfvbench.log import LOG
27 from nfvbench.traffic_server import TRexTrafficServer
28 from nfvbench.utils import cast_integer
29 from nfvbench.utils import timeout
30 from nfvbench.utils import TimeoutError
31
32 # pylint: disable=import-error
33 from trex.common.services.trex_service_arp import ServiceARP
34 from trex.stl.api import bind_layers
35 from trex.stl.api import CTRexVmInsFixHwCs
36 from trex.stl.api import Dot1Q
37 from trex.stl.api import Ether
38 from trex.stl.api import FlagsField
39 from trex.stl.api import IP
40 from trex.stl.api import Packet
41 from trex.stl.api import STLClient
42 from trex.stl.api import STLError
43 from trex.stl.api import STLFlowLatencyStats
44 from trex.stl.api import STLFlowStats
45 from trex.stl.api import STLPktBuilder
46 from trex.stl.api import STLScVmRaw
47 from trex.stl.api import STLStream
48 from trex.stl.api import STLTXCont
49 from trex.stl.api import STLVmFixChecksumHw
50 from trex.stl.api import STLVmFixIpv4
51 from trex.stl.api import STLVmFlowVar
52 from trex.stl.api import STLVmFlowVarRepeatableRandom
53 from trex.stl.api import STLVmWrFlowVar
54 from trex.stl.api import ThreeBytesField
55 from trex.stl.api import UDP
56 from trex.stl.api import XByteField
57
58 # pylint: enable=import-error
59
60 from .traffic_base import AbstractTrafficGenerator
61 from .traffic_base import TrafficGeneratorException
62 from . import traffic_utils as utils
63 from .traffic_utils import IMIX_AVG_L2_FRAME_SIZE
64 from .traffic_utils import IMIX_L2_SIZES
65 from .traffic_utils import IMIX_RATIOS
66
67 class VXLAN(Packet):
68     """VxLAN class."""
69
70     _VXLAN_FLAGS = ['R' * 27] + ['I'] + ['R' * 5]
71     name = "VXLAN"
72     fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS),
73                    ThreeBytesField("vni", 0),
74                    XByteField("reserved", 0x00)]
75
76     def mysummary(self):
77         """Summary."""
78         return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
79
80 class TRex(AbstractTrafficGenerator):
81     """TRex traffic generator driver."""
82
83     LATENCY_PPS = 1000
84     CHAIN_PG_ID_MASK = 0x007F
85     PORT_PG_ID_MASK = 0x0080
86     LATENCY_PG_ID_MASK = 0x0100
87
88     def __init__(self, traffic_client):
89         """Trex driver."""
90         AbstractTrafficGenerator.__init__(self, traffic_client)
91         self.client = None
92         self.id = count()
93         self.port_handle = []
94         self.chain_count = self.generator_config.service_chain_count
95         self.rates = []
96         self.capture_id = None
97         self.packet_list = []
98
99     def get_version(self):
100         """Get the Trex version."""
101         return self.client.get_server_version() if self.client else ''
102
103     def get_pg_id(self, port, chain_id):
104         """Calculate the packet group IDs to use for a given port/stream type/chain_id.
105
106         port: 0 or 1
107         chain_id: identifies to which chain the pg_id is associated (0 to 255)
108         return: pg_id, lat_pg_id
109
110         We use a bit mask to set up the 3 fields:
111         0x007F: chain ID (8 bits for a max of 128 chains)
112         0x0080: port bit
113         0x0100: latency bit
114         """
115         pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
116         return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
117
118     def extract_stats(self, in_stats):
119         """Extract stats from dict returned by Trex API.
120
121         :param in_stats: dict as returned by TRex api
122         """
123         utils.nan_replace(in_stats)
124         # LOG.debug(in_stats)
125
126         result = {}
127         # port_handles should have only 2 elements: [0, 1]
128         # so (1 - ph) will be the index for the far end port
129         for ph in self.port_handle:
130             stats = in_stats[ph]
131             far_end_stats = in_stats[1 - ph]
132             result[ph] = {
133                 'tx': {
134                     'total_pkts': cast_integer(stats['opackets']),
135                     'total_pkt_bytes': cast_integer(stats['obytes']),
136                     'pkt_rate': cast_integer(stats['tx_pps']),
137                     'pkt_bit_rate': cast_integer(stats['tx_bps'])
138                 },
139                 'rx': {
140                     'total_pkts': cast_integer(stats['ipackets']),
141                     'total_pkt_bytes': cast_integer(stats['ibytes']),
142                     'pkt_rate': cast_integer(stats['rx_pps']),
143                     'pkt_bit_rate': cast_integer(stats['rx_bps']),
144                     # how many pkts were dropped in RX direction
145                     # need to take the tx counter on the far end port
146                     'dropped_pkts': cast_integer(
147                         far_end_stats['opackets'] - stats['ipackets'])
148                 }
149             }
150             self.__combine_latencies(in_stats, result[ph]['rx'], ph)
151
152         total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
153         result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
154         result["flow_stats"] = in_stats["flow_stats"]
155         result["latency"] = in_stats["latency"]
156         return result
157
158     def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
159         """Extract the aggregated stats for a given chain.
160
161         trex_stats: stats as returned by get_stats()
162         if_stats: a list of 2 interface stats to update (port 0 and 1)
163         latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
164                    latencies[p] is the latency for packets sent on port p
165                    if there are no latency streams, the Latency instances are not modified
166         chain_idx: chain index of the interface stats
167
168         The packet counts include normal and latency streams.
169
170         Trex returns flows stats as follows:
171
172         'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
173                    'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
174                    'rx_bytes': {0: nan, 1: nan, 'total': nan},
175                    'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
176                    'rx_pps': {0: 0, 1: 0, 'total': 0},
177                    'tx_bps': {0: 0, 1: 0, 'total': 0},
178                    'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
179                    'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
180                    'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
181                    'tx_pps': {0: 0, 1: 0, 'total': 0}},
182                1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
183                    'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
184                    'rx_bytes': {0: nan, 1: nan, 'total': nan},
185                    'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
186                    'rx_pps': {0: 0, 1: 0, 'total': 0},
187                    'tx_bps': {0: 0, 1: 0, 'total': 0},
188                    'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
189                    'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
190                    'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
191                    'tx_pps': {0: 0, 1: 0, 'total': 0}},
192                 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
193                 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
194                 'rx_bytes': {0: nan, 1: nan, 'total': nan},
195                 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
196                 'rx_pps': {0: 0, 1: 0, 'total': 0},
197                 'tx_bps': {0: 0, 1: 0, 'total': 0},
198                 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
199                 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
200                 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
201                 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
202
203         the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
204         get_pg_id() method.
205         packet counters for a given stream sent on port p are reported as:
206         - tx_pkts[p] on port p
207         - rx_pkts[1-p] on the far end port
208
209         This is a tricky/critical counter transposition operation because
210         the results are grouped by port (not by stream):
211         tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
212         rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
213         tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
214         rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
215
216         or using a more generic formula:
217         tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
218         rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
219
220         the second formula is equivalent to
221         rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
222
223         If there are latency streams, those same counters need to be added in the same way
224         """
225         def get_latency(lval):
226             try:
227                 return int(round(lval))
228             except ValueError:
229                 return 0
230
231         for ifs in if_stats:
232             ifs.tx = ifs.rx = 0
233         for port in range(2):
234             pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
235             for pid in [pg_id, lat_pg_id]:
236                 try:
237                     pg_stats = trex_stats['flow_stats'][pid]
238                     if_stats[port].tx += pg_stats['tx_pkts'][port]
239                     if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
240                 except KeyError:
241                     pass
242             try:
243                 lat = trex_stats['latency'][lat_pg_id]['latency']
244                 # dropped_pkts += lat['err_cntrs']['dropped']
245                 latencies[port].max_usec = get_latency(lat['total_max'])
246                 if math.isnan(lat['total_min']):
247                     latencies[port].min_usec = 0
248                     latencies[port].avg_usec = 0
249                 else:
250                     latencies[port].min_usec = get_latency(lat['total_min'])
251                     latencies[port].avg_usec = get_latency(lat['average'])
252                 # pick up the HDR histogram if present (otherwise will raise KeyError)
253                 latencies[port].hdrh = lat['hdrh']
254             except KeyError:
255                 pass
256
257     def __combine_latencies(self, in_stats, results, port_handle):
258         """Traverse TRex result dictionary and combines chosen latency stats.
259
260           example of latency dict returned by trex (2 chains):
261          'latency': {256: {'err_cntrs': {'dropped': 0,
262                                  'dup': 0,
263                                  'out_of_order': 0,
264                                  'seq_too_high': 0,
265                                  'seq_too_low': 0},
266                             'latency': {'average': 26.5,
267                                         'hdrh': u'HISTFAAAAEx4nJNpmSgz...bFRgxi',
268                                         'histogram': {20: 303,
269                                                         30: 320,
270                                                         40: 300,
271                                                         50: 73,
272                                                         60: 4,
273                                                         70: 1},
274                                         'jitter': 14,
275                                         'last_max': 63,
276                                         'total_max': 63,
277                                         'total_min': 20}},
278                     257: {'err_cntrs': {'dropped': 0,
279                                         'dup': 0,
280                                         'out_of_order': 0,
281                                         'seq_too_high': 0,
282                                         'seq_too_low': 0},
283                             'latency': {'average': 29.75,
284                                         'hdrh': u'HISTFAAAAEV4nJN...CALilDG0=',
285                                         'histogram': {20: 261,
286                                                         30: 431,
287                                                         40: 3,
288                                                         50: 80,
289                                                         60: 225},
290                                         'jitter': 23,
291                                         'last_max': 67,
292                                         'total_max': 67,
293                                         'total_min': 20}},
294                     384: {'err_cntrs': {'dropped': 0,
295                                         'dup': 0,
296                                         'out_of_order': 0,
297                                         'seq_too_high': 0,
298                                         'seq_too_low': 0},
299                             'latency': {'average': 18.0,
300                                         'hdrh': u'HISTFAAAADR4nJNpm...MjCwDDxAZG',
301                                         'histogram': {20: 987, 30: 14},
302                                         'jitter': 0,
303                                         'last_max': 34,
304                                         'total_max': 34,
305                                         'total_min': 20}},
306                     385: {'err_cntrs': {'dropped': 0,
307                                     'dup': 0,
308                                     'out_of_order': 0,
309                                     'seq_too_high': 0,
310                                     'seq_too_low': 0},
311                             'latency': {'average': 19.0,
312                                         'hdrh': u'HISTFAAAADR4nJNpm...NkYmJgDdagfK',
313                                         'histogram': {20: 989, 30: 11},
314                                         'jitter': 0,
315                                         'last_max': 38,
316                                         'total_max': 38,
317                                         'total_min': 20}},
318                     'global': {'bad_hdr': 0, 'old_flow': 0}},
319         """
320         total_max = 0
321         average = 0
322         total_min = float("inf")
323         for chain_id in range(self.chain_count):
324             try:
325                 _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
326                 lat = in_stats['latency'][lat_pg_id]['latency']
327                 # dropped_pkts += lat['err_cntrs']['dropped']
328                 total_max = max(lat['total_max'], total_max)
329                 total_min = min(lat['total_min'], total_min)
330                 average += lat['average']
331             except KeyError:
332                 pass
333         if total_min == float("inf"):
334             total_min = 0
335         results['min_delay_usec'] = total_min
336         results['max_delay_usec'] = total_max
337         results['avg_delay_usec'] = int(average / self.chain_count)
338
339     def _bind_vxlan(self):
340         bind_layers(UDP, VXLAN, dport=4789)
341         bind_layers(VXLAN, Ether)
342
343     def _create_pkt(self, stream_cfg, l2frame_size):
344         """Create a packet of given size.
345
346         l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
347         """
348         # Trex will add the FCS field, so we need to remove 4 bytes from the l2 frame size
349         frame_size = int(l2frame_size) - 4
350         vm_param = []
351         if stream_cfg['vxlan'] is True:
352             self._bind_vxlan()
353             encap_level = '1'
354             pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
355             if stream_cfg['vtep_vlan'] is not None:
356                 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
357             pkt_base /= IP(src=stream_cfg['vtep_src_ip'], dst=stream_cfg['vtep_dst_ip'])
358             pkt_base /= UDP(sport=random.randint(1337, 32767), dport=4789)
359             pkt_base /= VXLAN(vni=stream_cfg['net_vni'])
360             pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
361             # need to randomize the outer header UDP src port based on flow
362             vxlan_udp_src_fv = STLVmFlowVar(
363                 name="vxlan_udp_src",
364                 min_value=1337,
365                 max_value=32767,
366                 size=2,
367                 op="random")
368             vm_param = [vxlan_udp_src_fv,
369                         STLVmWrFlowVar(fv_name="vxlan_udp_src", pkt_offset="UDP.sport")]
370         elif stream_cfg['mpls'] is True:
371             encap_level = '0'
372             pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
373             if stream_cfg['vtep_vlan'] is not None:
374                 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
375             if stream_cfg['mpls_outer_label'] is not None:
376                 pkt_base /= MPLS(label=stream_cfg['mpls_outer_label'], cos=1, s=0, ttl=255)
377             if stream_cfg['mpls_inner_label'] is not None:
378                 pkt_base /= MPLS(label=stream_cfg['mpls_inner_label'], cos=1, s=1, ttl=255)
379             #  Flow stats and MPLS labels randomization TBD
380             pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
381         else:
382             encap_level = '0'
383             pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
384
385         if stream_cfg['vlan_tag'] is not None:
386             pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
387
388         udp_args = {}
389         if stream_cfg['udp_src_port']:
390             udp_args['sport'] = int(stream_cfg['udp_src_port'])
391             udp_args['sport_step'] = int(stream_cfg['udp_port_step'])
392             udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
393         if stream_cfg['udp_dst_port']:
394             udp_args['dport'] = int(stream_cfg['udp_dst_port'])
395             udp_args['dport_step'] = int(stream_cfg['udp_port_step'])
396             udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
397
398         pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
399                     UDP(dport=udp_args['dport'], sport=udp_args['sport'])
400         if stream_cfg['ip_src_static'] is True:
401             src_max_ip_value = stream_cfg['ip_src_addr']
402         else:
403             src_max_ip_value = stream_cfg['ip_src_addr_max']
404         if stream_cfg['ip_addrs_step'] == 'random':
405             src_fv_ip = STLVmFlowVarRepeatableRandom(
406                 name="ip_src",
407                 min_value=stream_cfg['ip_src_addr'],
408                 max_value=src_max_ip_value,
409                 size=4,
410                 seed=random.randint(0, 32767),
411                 limit=stream_cfg['ip_src_count'])
412             dst_fv_ip = STLVmFlowVarRepeatableRandom(
413                 name="ip_dst",
414                 min_value=stream_cfg['ip_dst_addr'],
415                 max_value=stream_cfg['ip_dst_addr_max'],
416                 size=4,
417                 seed=random.randint(0, 32767),
418                 limit=stream_cfg['ip_dst_count'])
419         else:
420             src_fv_ip = STLVmFlowVar(
421                 name="ip_src",
422                 min_value=stream_cfg['ip_src_addr'],
423                 max_value=src_max_ip_value,
424                 size=4,
425                 op="inc",
426                 step=stream_cfg['ip_addrs_step'])
427             dst_fv_ip = STLVmFlowVar(
428                 name="ip_dst",
429                 min_value=stream_cfg['ip_dst_addr'],
430                 max_value=stream_cfg['ip_dst_addr_max'],
431                 size=4,
432                 op="inc",
433                 step=stream_cfg['ip_addrs_step'])
434
435         if stream_cfg['udp_port_step'] == 'random':
436             src_fv_port = STLVmFlowVarRepeatableRandom(
437                 name="p_src",
438                 min_value=udp_args['sport'],
439                 max_value=udp_args['sport_max'],
440                 size=2,
441                 seed=random.randint(0, 32767),
442                 limit=udp_args['udp_src_count'])
443             dst_fv_port = STLVmFlowVarRepeatableRandom(
444                 name="p_dst",
445                 min_value=udp_args['dport'],
446                 max_value=udp_args['dport_max'],
447                 size=2,
448                 seed=random.randint(0, 32767),
449                 limit=stream_cfg['udp_dst_count'])
450         else:
451             src_fv_port = STLVmFlowVar(
452                 name="p_src",
453                 min_value=udp_args['sport'],
454                 max_value=udp_args['sport_max'],
455                 size=2,
456                 op="inc",
457                 step=udp_args['sport_step'])
458             dst_fv_port = STLVmFlowVar(
459                 name="p_dst",
460                 min_value=udp_args['dport'],
461                 max_value=udp_args['dport_max'],
462                 size=2,
463                 op="inc",
464                 step=udp_args['dport_step'])
465         vm_param = [
466             src_fv_ip,
467             STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
468             src_fv_port,
469             STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
470             dst_fv_ip,
471             STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
472             dst_fv_port,
473             STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
474         ]
475         # Use HW Offload to calculate the outter IP/UDP packet
476         vm_param.append(STLVmFixChecksumHw(l3_offset="IP:0",
477                                            l4_offset="UDP:0",
478                                            l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP))
479         # Use software to fix the inner IP/UDP payload for VxLAN packets
480         if int(encap_level):
481             vm_param.append(STLVmFixIpv4(offset="IP:1"))
482         pad = max(0, frame_size - len(pkt_base)) * 'x'
483
484         return STLPktBuilder(pkt=pkt_base / pad,
485                              vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
486
487     def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
488                          e2e=False):
489         """Create a list of streams corresponding to a given chain and stream config.
490
491         port: port where the streams originate (0 or 1)
492         chain_id: the chain to which the streams are associated to
493         stream_cfg: stream configuration
494         l2frame: L2 frame size (including 4-byte FCS) or 'IMIX'
495         latency: if True also create a latency stream
496         e2e: True if performing "end to end" connectivity check
497         """
498         streams = []
499         pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
500         if self.config.no_flow_stats:
501             LOG.info("Traffic flow statistics are disabled.")
502         if l2frame == 'IMIX':
503             for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
504                 pkt = self._create_pkt(stream_cfg, l2_frame_size)
505                 if e2e or stream_cfg['mpls']:
506                     streams.append(STLStream(packet=pkt,
507                                              mode=STLTXCont(pps=ratio)))
508                 else:
509                     if stream_cfg['vxlan'] is True:
510                         streams.append(STLStream(packet=pkt,
511                                                  flow_stats=STLFlowStats(pg_id=pg_id,
512                                                                          vxlan=True)
513                                                  if not self.config.no_flow_stats else None,
514                                                  mode=STLTXCont(pps=ratio)))
515                     else:
516                         streams.append(STLStream(packet=pkt,
517                                                  flow_stats=STLFlowStats(pg_id=pg_id)
518                                                  if not self.config.no_flow_stats else None,
519                                                  mode=STLTXCont(pps=ratio)))
520
521             if latency:
522                 # for IMIX, the latency packets have the average IMIX packet size
523                 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
524
525         else:
526             l2frame_size = int(l2frame)
527             pkt = self._create_pkt(stream_cfg, l2frame_size)
528             if e2e or stream_cfg['mpls']:
529                 streams.append(STLStream(packet=pkt,
530                                          # Flow stats is disabled for MPLS now
531                                          # flow_stats=STLFlowStats(pg_id=pg_id),
532                                          mode=STLTXCont()))
533             else:
534                 if stream_cfg['vxlan'] is True:
535                     streams.append(STLStream(packet=pkt,
536                                              flow_stats=STLFlowStats(pg_id=pg_id,
537                                                                      vxlan=True)
538                                              if not self.config.no_flow_stats else None,
539                                              mode=STLTXCont()))
540                 else:
541                     streams.append(STLStream(packet=pkt,
542                                              flow_stats=STLFlowStats(pg_id=pg_id)
543                                              if not self.config.no_flow_stats else None,
544                                              mode=STLTXCont()))
545             # for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
546             # without vlan, the min l2 frame size is 64
547             # with vlan it is 68
548             # This only applies to the latency stream
549             if latency and stream_cfg['vlan_tag'] and l2frame_size < 68:
550                 pkt = self._create_pkt(stream_cfg, 68)
551
552         if latency:
553             if self.config.no_latency_stats:
554                 LOG.info("Latency flow statistics are disabled.")
555             if stream_cfg['vxlan'] is True:
556                 streams.append(STLStream(packet=pkt,
557                                          flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
558                                                                         vxlan=True)
559                                          if not self.config.no_latency_stats else None,
560                                          mode=STLTXCont(pps=self.LATENCY_PPS)))
561             else:
562                 streams.append(STLStream(packet=pkt,
563                                          flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
564                                          if not self.config.no_latency_stats else None,
565                                          mode=STLTXCont(pps=self.LATENCY_PPS)))
566         return streams
567
568     @timeout(5)
569     def __connect(self, client):
570         client.connect()
571
572     def __connect_after_start(self):
573         # after start, Trex may take a bit of time to initialize
574         # so we need to retry a few times
575         for it in range(self.config.generic_retry_count):
576             try:
577                 time.sleep(1)
578                 self.client.connect()
579                 break
580             except Exception as ex:
581                 if it == (self.config.generic_retry_count - 1):
582                     raise
583                 LOG.info("Retrying connection to TRex (%s)...", ex.msg)
584
585     def connect(self):
586         """Connect to the TRex server."""
587         server_ip = self.generator_config.ip
588         LOG.info("Connecting to TRex (%s)...", server_ip)
589
590         # Connect to TRex server
591         self.client = STLClient(server=server_ip, sync_port=self.generator_config.zmq_rpc_port,
592                                 async_port=self.generator_config.zmq_pub_port)
593         try:
594             self.__connect(self.client)
595             if server_ip == '127.0.0.1':
596                 config_updated = self.__check_config()
597                 if config_updated or self.config.restart:
598                     self.__restart()
599         except (TimeoutError, STLError) as e:
600             if server_ip == '127.0.0.1':
601                 self.__start_local_server()
602             else:
603                 raise TrafficGeneratorException(e.message)
604
605         ports = list(self.generator_config.ports)
606         self.port_handle = ports
607         # Prepare the ports
608         self.client.reset(ports)
609         # Read HW information from each port
610         # this returns an array of dict (1 per port)
611         """
612         Example of output for Intel XL710
613         [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
614           'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
615           u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
616           u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
617           u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
618           'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
619           u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
620           'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
621           'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
622           'layer_mode': 'Ethernet', u'numa': 0}, ...]
623         """
624         self.port_info = self.client.get_port_info(ports)
625         LOG.info('Connected to TRex')
626         for id, port in enumerate(self.port_info):
627             LOG.info('   Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
628                      id, port['description'], port['speed'], port['src_mac'],
629                      port['pci_addr'], port['driver'])
630         # Make sure the 2 ports have the same speed
631         if self.port_info[0]['speed'] != self.port_info[1]['speed']:
632             raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
633                                             (self.port_info[0]['speed'],
634                                              self.port_info[1]['speed']))
635
636     def __start_local_server(self):
637         try:
638             LOG.info("Starting TRex ...")
639             self.__start_server()
640             self.__connect_after_start()
641         except (TimeoutError, STLError) as e:
642             LOG.error('Cannot connect to TRex')
643             LOG.error(traceback.format_exc())
644             logpath = '/tmp/trex.log'
645             if os.path.isfile(logpath):
646                 # Wait for TRex to finish writing error message
647                 last_size = 0
648                 for _ in range(self.config.generic_retry_count):
649                     size = os.path.getsize(logpath)
650                     if size == last_size:
651                         # probably not writing anymore
652                         break
653                     last_size = size
654                     time.sleep(1)
655                 with open(logpath, 'r') as f:
656                     message = f.read()
657             else:
658                 message = e.message
659             raise TrafficGeneratorException(message)
660
661     def __start_server(self):
662         server = TRexTrafficServer()
663         server.run_server(self.generator_config)
664
665     def __check_config(self):
666         server = TRexTrafficServer()
667         return server.check_config_updated(self.generator_config)
668
669     def __restart(self):
670         LOG.info("Restarting TRex ...")
671         self.__stop_server()
672         # Wait for server stopped
673         for _ in range(self.config.generic_retry_count):
674             time.sleep(1)
675             if not self.client.is_connected():
676                 LOG.info("TRex is stopped...")
677                 break
678         self.__start_local_server()
679
680     def __stop_server(self):
681         if self.generator_config.ip == '127.0.0.1':
682             ports = self.client.get_acquired_ports()
683             LOG.info('Release ports %s and stopping TRex...', ports)
684             try:
685                 if ports:
686                     self.client.release(ports=ports)
687                 self.client.server_shutdown()
688             except STLError as e:
689                 LOG.warning('Unable to stop TRex. Error: %s', e)
690         else:
691             LOG.info('Using remote TRex. Unable to stop TRex')
692
693     def resolve_arp(self):
694         """Resolve all configured remote IP addresses.
695
696         return: None if ARP failed to resolve for all IP addresses
697                 else a dict of list of dest macs indexed by port#
698                 the dest macs in the list are indexed by the chain id
699         """
700         self.client.set_service_mode(ports=self.port_handle)
701         LOG.info('Polling ARP until successful...')
702         arp_dest_macs = {}
703         for port, device in zip(self.port_handle, self.generator_config.devices):
704             # there should be 1 stream config per chain
705             stream_configs = device.get_stream_configs()
706             chain_count = len(stream_configs)
707             ctx = self.client.create_service_ctx(port=port)
708             # all dest macs on this port indexed by chain ID
709             dst_macs = [None] * chain_count
710             dst_macs_count = 0
711             # the index in the list is the chain id
712             if self.config.vxlan or self.config.mpls:
713                 arps = [
714                     ServiceARP(ctx,
715                                src_ip=device.vtep_src_ip,
716                                dst_ip=device.vtep_dst_ip,
717                                vlan=device.vtep_vlan)
718                     for cfg in stream_configs
719                 ]
720             else:
721                 arps = [
722                     ServiceARP(ctx,
723                                src_ip=cfg['ip_src_tg_gw'],
724                                dst_ip=cfg['mac_discovery_gw'],
725                                # will be None if no vlan tagging
726                                vlan=cfg['vlan_tag'])
727                     for cfg in stream_configs
728                 ]
729
730             for attempt in range(self.config.generic_retry_count):
731                 try:
732                     ctx.run(arps)
733                 except STLError:
734                     LOG.error(traceback.format_exc())
735                     continue
736
737                 unresolved = []
738                 for chain_id, mac in enumerate(dst_macs):
739                     if not mac:
740                         arp_record = arps[chain_id].get_record()
741                         if arp_record.dst_mac:
742                             dst_macs[chain_id] = arp_record.dst_mac
743                             dst_macs_count += 1
744                             LOG.info('   ARP: port=%d chain=%d src IP=%s dst IP=%s -> MAC=%s',
745                                      port, chain_id,
746                                      arp_record.src_ip,
747                                      arp_record.dst_ip, arp_record.dst_mac)
748                         else:
749                             unresolved.append(arp_record.dst_ip)
750                 if dst_macs_count == chain_count:
751                     arp_dest_macs[port] = dst_macs
752                     LOG.info('ARP resolved successfully for port %s', port)
753                     break
754
755                 retry = attempt + 1
756                 LOG.info('Retrying ARP for: %s (retry %d/%d)',
757                          unresolved, retry, self.config.generic_retry_count)
758                 if retry < self.config.generic_retry_count:
759                     time.sleep(self.config.generic_poll_sec)
760             else:
761                 LOG.error('ARP timed out for port %s (resolved %d out of %d)',
762                           port,
763                           dst_macs_count,
764                           chain_count)
765                 break
766
767         # if the capture from the TRex console was started before the arp request step,
768         # it keeps 'service_mode' enabled, otherwise, it disables the 'service_mode'
769         if not self.config.service_mode:
770             self.client.set_service_mode(ports=self.port_handle, enabled=False)
771         if len(arp_dest_macs) == len(self.port_handle):
772             return arp_dest_macs
773         return None
774
775     def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
776         """Check if rate provided by user is above requirements. Applies only if latency is True."""
777         intf_speed = self.generator_config.intf_speed
778         if latency:
779             if bidirectional:
780                 mult = 2
781                 total_rate = 0
782                 for rate in rates:
783                     r = utils.convert_rates(l2frame_size, rate, intf_speed)
784                     total_rate += int(r['rate_pps'])
785             else:
786                 mult = 1
787                 total_rate = utils.convert_rates(l2frame_size, rates[0], intf_speed)
788             # rate must be enough for latency stream and at least 1 pps for base stream per chain
789             required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
790             result = utils.convert_rates(l2frame_size,
791                                          {'rate_pps': required_rate},
792                                          intf_speed * mult)
793             result['result'] = total_rate >= required_rate
794             return result
795
796         return {'result': True}
797
798     def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
799         """Program all the streams in Trex server.
800
801         l2frame_size: L2 frame size or IMIX
802         rates: a list of 2 rates to run each direction
803                each rate is a dict like {'rate_pps': '10kpps'}
804         bidirectional: True if bidirectional
805         latency: True if latency measurement is needed
806         e2e: True if performing "end to end" connectivity check
807         """
808         r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
809         if not r['result']:
810             raise TrafficGeneratorException(
811                 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
812                 .format(pps=r['rate_pps'],
813                         bps=r['rate_bps'],
814                         load=r['rate_percent']))
815         # a dict of list of streams indexed by port#
816         # in case of fixed size, has self.chain_count * 2 * 2 streams
817         # (1 normal + 1 latency stream per direction per chain)
818         # for IMIX, has self.chain_count * 2 * 4 streams
819         # (3 normal + 1 latency stream per direction per chain)
820         streamblock = {}
821         for port in self.port_handle:
822             streamblock[port] = []
823         stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
824         self.rates = [utils.to_rate_str(rate) for rate in rates]
825         for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
826             streamblock[0].extend(self.generate_streams(self.port_handle[0],
827                                                         chain_id,
828                                                         fwd_stream_cfg,
829                                                         l2frame_size,
830                                                         latency=latency,
831                                                         e2e=e2e))
832             if len(self.rates) > 1:
833                 streamblock[1].extend(self.generate_streams(self.port_handle[1],
834                                                             chain_id,
835                                                             rev_stream_cfg,
836                                                             l2frame_size,
837                                                             latency=bidirectional and latency,
838                                                             e2e=e2e))
839
840         for port in self.port_handle:
841             if self.config.vxlan:
842                 self.client.set_port_attr(ports=port, vxlan_fs=[4789])
843             else:
844                 self.client.set_port_attr(ports=port, vxlan_fs=None)
845             self.client.add_streams(streamblock[port], ports=port)
846             LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
847
848     def clear_streamblock(self):
849         """Clear all streams from TRex."""
850         self.rates = []
851         self.client.reset(self.port_handle)
852         LOG.info('Cleared all existing streams')
853
854     def get_stats(self):
855         """Get stats from Trex."""
856         stats = self.client.get_stats()
857         return self.extract_stats(stats)
858
859     def get_macs(self):
860         """Return the Trex local port MAC addresses.
861
862         return: a list of MAC addresses indexed by the port#
863         """
864         return [port['src_mac'] for port in self.port_info]
865
866     def get_port_speed_gbps(self):
867         """Return the Trex local port MAC addresses.
868
869         return: a list of speed in Gbps indexed by the port#
870         """
871         return [port['speed'] for port in self.port_info]
872
873     def clear_stats(self):
874         """Clear all stats in the traffic gneerator."""
875         if self.port_handle:
876             self.client.clear_stats()
877
878     def start_traffic(self):
879         """Start generating traffic in all ports."""
880         for port, rate in zip(self.port_handle, self.rates):
881             self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
882
883     def stop_traffic(self):
884         """Stop generating traffic."""
885         self.client.stop(ports=self.port_handle)
886
887     def start_capture(self):
888         """Capture all packets on both ports that are unicast to us."""
889         if self.capture_id:
890             self.stop_capture()
891         # Need to filter out unwanted packets so we do not end up counting
892         # src MACs of frames that are not unicast to us
893         src_mac_list = self.get_macs()
894         bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
895         # ports must be set in service in order to enable capture
896         self.client.set_service_mode(ports=self.port_handle)
897         self.capture_id = self.client.start_capture \
898             (rx_ports=self.port_handle, bpf_filter=bpf_filter)
899
900     def fetch_capture_packets(self):
901         """Fetch capture packets in capture mode."""
902         if self.capture_id:
903             self.packet_list = []
904             self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
905                                               output=self.packet_list)
906
907     def stop_capture(self):
908         """Stop capturing packets."""
909         if self.capture_id:
910             self.client.stop_capture(capture_id=self.capture_id['id'])
911             self.capture_id = None
912             # if the capture from TRex console was started before the connectivity step,
913             # it keeps 'service_mode' enabled, otherwise, it disables the 'service_mode'
914             if not self.config.service_mode:
915                 self.client.set_service_mode(ports=self.port_handle, enabled=False)
916
917     def cleanup(self):
918         """Cleanup Trex driver."""
919         if self.client:
920             try:
921                 self.client.reset(self.port_handle)
922                 self.client.disconnect()
923             except STLError:
924                 # TRex does not like a reset while in disconnected state
925                 pass
926
927     def set_service_mode(self, enabled=True):
928         """Enable/disable the 'service_mode'."""
929         self.client.set_service_mode(ports=self.port_handle, enabled=enabled)