NFVBENCH-158 Allow multiple UDP ports in traffic generation
[nfvbench.git] / nfvbench / traffic_gen / trex_gen.py
1 # Copyright 2016 Cisco Systems, Inc.  All rights reserved.
2 #
3 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
4 #    not use this file except in compliance with the License. You may obtain
5 #    a copy of the License at
6 #
7 #         http://www.apache.org/licenses/LICENSE-2.0
8 #
9 #    Unless required by applicable law or agreed to in writing, software
10 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 #    License for the specific language governing permissions and limitations
13 #    under the License.
14 """Driver module for TRex traffic generator."""
15
16 import math
17 import os
18 import random
19 import time
20 import traceback
21
22 from itertools import count
23 # pylint: disable=import-error
24 from scapy.contrib.mpls import MPLS  # flake8: noqa
25 # pylint: enable=import-error
26 from nfvbench.log import LOG
27 from nfvbench.traffic_server import TRexTrafficServer
28 from nfvbench.utils import cast_integer
29 from nfvbench.utils import timeout
30 from nfvbench.utils import TimeoutError
31
32 # pylint: disable=import-error
33 from trex.common.services.trex_service_arp import ServiceARP
34 from trex.stl.api import bind_layers
35 from trex.stl.api import CTRexVmInsFixHwCs
36 from trex.stl.api import Dot1Q
37 from trex.stl.api import Ether
38 from trex.stl.api import FlagsField
39 from trex.stl.api import IP
40 from trex.stl.api import Packet
41 from trex.stl.api import STLClient
42 from trex.stl.api import STLError
43 from trex.stl.api import STLFlowLatencyStats
44 from trex.stl.api import STLFlowStats
45 from trex.stl.api import STLPktBuilder
46 from trex.stl.api import STLScVmRaw
47 from trex.stl.api import STLStream
48 from trex.stl.api import STLTXCont
49 from trex.stl.api import STLVmFixChecksumHw
50 from trex.stl.api import STLVmFlowVar
51 from trex.stl.api import STLVmFlowVarRepeatableRandom
52 from trex.stl.api import STLVmWrFlowVar
53 from trex.stl.api import ThreeBytesField
54 from trex.stl.api import UDP
55 from trex.stl.api import XByteField
56
57 # pylint: enable=import-error
58
59 from .traffic_base import AbstractTrafficGenerator
60 from .traffic_base import TrafficGeneratorException
61 from . import traffic_utils as utils
62 from .traffic_utils import IMIX_AVG_L2_FRAME_SIZE
63 from .traffic_utils import IMIX_L2_SIZES
64 from .traffic_utils import IMIX_RATIOS
65
66 class VXLAN(Packet):
67     """VxLAN class."""
68
69     _VXLAN_FLAGS = ['R' * 27] + ['I'] + ['R' * 5]
70     name = "VXLAN"
71     fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS),
72                    ThreeBytesField("vni", 0),
73                    XByteField("reserved", 0x00)]
74
75     def mysummary(self):
76         """Summary."""
77         return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
78
79 class TRex(AbstractTrafficGenerator):
80     """TRex traffic generator driver."""
81
82     LATENCY_PPS = 1000
83     CHAIN_PG_ID_MASK = 0x007F
84     PORT_PG_ID_MASK = 0x0080
85     LATENCY_PG_ID_MASK = 0x0100
86
87     def __init__(self, traffic_client):
88         """Trex driver."""
89         AbstractTrafficGenerator.__init__(self, traffic_client)
90         self.client = None
91         self.id = count()
92         self.port_handle = []
93         self.chain_count = self.generator_config.service_chain_count
94         self.rates = []
95         self.capture_id = None
96         self.packet_list = []
97
98     def get_version(self):
99         """Get the Trex version."""
100         return self.client.get_server_version() if self.client else ''
101
102     def get_pg_id(self, port, chain_id):
103         """Calculate the packet group IDs to use for a given port/stream type/chain_id.
104
105         port: 0 or 1
106         chain_id: identifies to which chain the pg_id is associated (0 to 255)
107         return: pg_id, lat_pg_id
108
109         We use a bit mask to set up the 3 fields:
110         0x007F: chain ID (8 bits for a max of 128 chains)
111         0x0080: port bit
112         0x0100: latency bit
113         """
114         pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
115         return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
116
117     def extract_stats(self, in_stats):
118         """Extract stats from dict returned by Trex API.
119
120         :param in_stats: dict as returned by TRex api
121         """
122         utils.nan_replace(in_stats)
123         # LOG.debug(in_stats)
124
125         result = {}
126         # port_handles should have only 2 elements: [0, 1]
127         # so (1 - ph) will be the index for the far end port
128         for ph in self.port_handle:
129             stats = in_stats[ph]
130             far_end_stats = in_stats[1 - ph]
131             result[ph] = {
132                 'tx': {
133                     'total_pkts': cast_integer(stats['opackets']),
134                     'total_pkt_bytes': cast_integer(stats['obytes']),
135                     'pkt_rate': cast_integer(stats['tx_pps']),
136                     'pkt_bit_rate': cast_integer(stats['tx_bps'])
137                 },
138                 'rx': {
139                     'total_pkts': cast_integer(stats['ipackets']),
140                     'total_pkt_bytes': cast_integer(stats['ibytes']),
141                     'pkt_rate': cast_integer(stats['rx_pps']),
142                     'pkt_bit_rate': cast_integer(stats['rx_bps']),
143                     # how many pkts were dropped in RX direction
144                     # need to take the tx counter on the far end port
145                     'dropped_pkts': cast_integer(
146                         far_end_stats['opackets'] - stats['ipackets'])
147                 }
148             }
149             self.__combine_latencies(in_stats, result[ph]['rx'], ph)
150
151         total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
152         result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
153         result["flow_stats"] = in_stats["flow_stats"]
154         result["latency"] = in_stats["latency"]
155         return result
156
157     def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
158         """Extract the aggregated stats for a given chain.
159
160         trex_stats: stats as returned by get_stats()
161         if_stats: a list of 2 interface stats to update (port 0 and 1)
162         latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
163                    latencies[p] is the latency for packets sent on port p
164                    if there are no latency streams, the Latency instances are not modified
165         chain_idx: chain index of the interface stats
166
167         The packet counts include normal and latency streams.
168
169         Trex returns flows stats as follows:
170
171         'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
172                    'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
173                    'rx_bytes': {0: nan, 1: nan, 'total': nan},
174                    'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
175                    'rx_pps': {0: 0, 1: 0, 'total': 0},
176                    'tx_bps': {0: 0, 1: 0, 'total': 0},
177                    'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
178                    'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
179                    'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
180                    'tx_pps': {0: 0, 1: 0, 'total': 0}},
181                1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
182                    'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
183                    'rx_bytes': {0: nan, 1: nan, 'total': nan},
184                    'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
185                    'rx_pps': {0: 0, 1: 0, 'total': 0},
186                    'tx_bps': {0: 0, 1: 0, 'total': 0},
187                    'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
188                    'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
189                    'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
190                    'tx_pps': {0: 0, 1: 0, 'total': 0}},
191                 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
192                 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
193                 'rx_bytes': {0: nan, 1: nan, 'total': nan},
194                 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
195                 'rx_pps': {0: 0, 1: 0, 'total': 0},
196                 'tx_bps': {0: 0, 1: 0, 'total': 0},
197                 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
198                 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
199                 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
200                 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
201
202         the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
203         get_pg_id() method.
204         packet counters for a given stream sent on port p are reported as:
205         - tx_pkts[p] on port p
206         - rx_pkts[1-p] on the far end port
207
208         This is a tricky/critical counter transposition operation because
209         the results are grouped by port (not by stream):
210         tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
211         rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
212         tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
213         rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
214
215         or using a more generic formula:
216         tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
217         rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
218
219         the second formula is equivalent to
220         rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
221
222         If there are latency streams, those same counters need to be added in the same way
223         """
224         def get_latency(lval):
225             try:
226                 return int(round(lval))
227             except ValueError:
228                 return 0
229
230         for ifs in if_stats:
231             ifs.tx = ifs.rx = 0
232         for port in range(2):
233             pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
234             for pid in [pg_id, lat_pg_id]:
235                 try:
236                     pg_stats = trex_stats['flow_stats'][pid]
237                     if_stats[port].tx += pg_stats['tx_pkts'][port]
238                     if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
239                 except KeyError:
240                     pass
241             try:
242                 lat = trex_stats['latency'][lat_pg_id]['latency']
243                 # dropped_pkts += lat['err_cntrs']['dropped']
244                 latencies[port].max_usec = get_latency(lat['total_max'])
245                 if math.isnan(lat['total_min']):
246                     latencies[port].min_usec = 0
247                     latencies[port].avg_usec = 0
248                 else:
249                     latencies[port].min_usec = get_latency(lat['total_min'])
250                     latencies[port].avg_usec = get_latency(lat['average'])
251                 # pick up the HDR histogram if present (otherwise will raise KeyError)
252                 latencies[port].hdrh = lat['hdrh']
253             except KeyError:
254                 pass
255
256     def __combine_latencies(self, in_stats, results, port_handle):
257         """Traverse TRex result dictionary and combines chosen latency stats.
258
259           example of latency dict returned by trex (2 chains):
260          'latency': {256: {'err_cntrs': {'dropped': 0,
261                                  'dup': 0,
262                                  'out_of_order': 0,
263                                  'seq_too_high': 0,
264                                  'seq_too_low': 0},
265                             'latency': {'average': 26.5,
266                                         'hdrh': u'HISTFAAAAEx4nJNpmSgz...bFRgxi',
267                                         'histogram': {20: 303,
268                                                         30: 320,
269                                                         40: 300,
270                                                         50: 73,
271                                                         60: 4,
272                                                         70: 1},
273                                         'jitter': 14,
274                                         'last_max': 63,
275                                         'total_max': 63,
276                                         'total_min': 20}},
277                     257: {'err_cntrs': {'dropped': 0,
278                                         'dup': 0,
279                                         'out_of_order': 0,
280                                         'seq_too_high': 0,
281                                         'seq_too_low': 0},
282                             'latency': {'average': 29.75,
283                                         'hdrh': u'HISTFAAAAEV4nJN...CALilDG0=',
284                                         'histogram': {20: 261,
285                                                         30: 431,
286                                                         40: 3,
287                                                         50: 80,
288                                                         60: 225},
289                                         'jitter': 23,
290                                         'last_max': 67,
291                                         'total_max': 67,
292                                         'total_min': 20}},
293                     384: {'err_cntrs': {'dropped': 0,
294                                         'dup': 0,
295                                         'out_of_order': 0,
296                                         'seq_too_high': 0,
297                                         'seq_too_low': 0},
298                             'latency': {'average': 18.0,
299                                         'hdrh': u'HISTFAAAADR4nJNpm...MjCwDDxAZG',
300                                         'histogram': {20: 987, 30: 14},
301                                         'jitter': 0,
302                                         'last_max': 34,
303                                         'total_max': 34,
304                                         'total_min': 20}},
305                     385: {'err_cntrs': {'dropped': 0,
306                                     'dup': 0,
307                                     'out_of_order': 0,
308                                     'seq_too_high': 0,
309                                     'seq_too_low': 0},
310                             'latency': {'average': 19.0,
311                                         'hdrh': u'HISTFAAAADR4nJNpm...NkYmJgDdagfK',
312                                         'histogram': {20: 989, 30: 11},
313                                         'jitter': 0,
314                                         'last_max': 38,
315                                         'total_max': 38,
316                                         'total_min': 20}},
317                     'global': {'bad_hdr': 0, 'old_flow': 0}},
318         """
319         total_max = 0
320         average = 0
321         total_min = float("inf")
322         for chain_id in range(self.chain_count):
323             try:
324                 _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
325                 lat = in_stats['latency'][lat_pg_id]['latency']
326                 # dropped_pkts += lat['err_cntrs']['dropped']
327                 total_max = max(lat['total_max'], total_max)
328                 total_min = min(lat['total_min'], total_min)
329                 average += lat['average']
330             except KeyError:
331                 pass
332         if total_min == float("inf"):
333             total_min = 0
334         results['min_delay_usec'] = total_min
335         results['max_delay_usec'] = total_max
336         results['avg_delay_usec'] = int(average / self.chain_count)
337
338     def _bind_vxlan(self):
339         bind_layers(UDP, VXLAN, dport=4789)
340         bind_layers(VXLAN, Ether)
341
342     def _create_pkt(self, stream_cfg, l2frame_size):
343         """Create a packet of given size.
344
345         l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
346         """
347         # Trex will add the FCS field, so we need to remove 4 bytes from the l2 frame size
348         frame_size = int(l2frame_size) - 4
349         vm_param = []
350         if stream_cfg['vxlan'] is True:
351             self._bind_vxlan()
352             encap_level = '1'
353             pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
354             if stream_cfg['vtep_vlan'] is not None:
355                 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
356             pkt_base /= IP(src=stream_cfg['vtep_src_ip'], dst=stream_cfg['vtep_dst_ip'])
357             pkt_base /= UDP(sport=random.randint(1337, 32767), dport=4789)
358             pkt_base /= VXLAN(vni=stream_cfg['net_vni'])
359             pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
360             # need to randomize the outer header UDP src port based on flow
361             vxlan_udp_src_fv = STLVmFlowVar(
362                 name="vxlan_udp_src",
363                 min_value=1337,
364                 max_value=32767,
365                 size=2,
366                 op="random")
367             vm_param = [vxlan_udp_src_fv,
368                         STLVmWrFlowVar(fv_name="vxlan_udp_src", pkt_offset="UDP.sport")]
369         elif stream_cfg['mpls'] is True:
370             encap_level = '0'
371             pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
372             if stream_cfg['vtep_vlan'] is not None:
373                 pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
374             if stream_cfg['mpls_outer_label'] is not None:
375                 pkt_base /= MPLS(label=stream_cfg['mpls_outer_label'], cos=1, s=0, ttl=255)
376             if stream_cfg['mpls_inner_label'] is not None:
377                 pkt_base /= MPLS(label=stream_cfg['mpls_inner_label'], cos=1, s=1, ttl=255)
378             #  Flow stats and MPLS labels randomization TBD
379             pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
380         else:
381             encap_level = '0'
382             pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
383
384         if stream_cfg['vlan_tag'] is not None:
385             pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
386
387         udp_args = {}
388         if stream_cfg['udp_src_port']:
389             udp_args['sport'] = int(stream_cfg['udp_src_port'])
390             udp_args['sport_step'] = int(stream_cfg['udp_port_step'])
391             udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
392         if stream_cfg['udp_dst_port']:
393             udp_args['dport'] = int(stream_cfg['udp_dst_port'])
394             udp_args['dport_step'] = int(stream_cfg['udp_port_step'])
395             udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
396
397         pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
398                     UDP(dport=udp_args['dport'], sport=udp_args['sport'])
399         if stream_cfg['ip_src_static'] is True:
400             src_max_ip_value = stream_cfg['ip_src_addr']
401         else:
402             src_max_ip_value = stream_cfg['ip_src_addr_max']
403         if stream_cfg['ip_addrs_step'] == 'random':
404             src_fv_ip = STLVmFlowVarRepeatableRandom(
405                 name="ip_src",
406                 min_value=stream_cfg['ip_src_addr'],
407                 max_value=src_max_ip_value,
408                 size=4,
409                 seed=random.randint(0, 32767),
410                 limit=stream_cfg['ip_src_count'])
411             dst_fv_ip = STLVmFlowVarRepeatableRandom(
412                 name="ip_dst",
413                 min_value=stream_cfg['ip_dst_addr'],
414                 max_value=stream_cfg['ip_dst_addr_max'],
415                 size=4,
416                 seed=random.randint(0, 32767),
417                 limit=stream_cfg['ip_dst_count'])
418         else:
419             src_fv_ip = STLVmFlowVar(
420                 name="ip_src",
421                 min_value=stream_cfg['ip_src_addr'],
422                 max_value=src_max_ip_value,
423                 size=4,
424                 op="inc",
425                 step=stream_cfg['ip_addrs_step'])
426             dst_fv_ip = STLVmFlowVar(
427                 name="ip_dst",
428                 min_value=stream_cfg['ip_dst_addr'],
429                 max_value=stream_cfg['ip_dst_addr_max'],
430                 size=4,
431                 op="inc",
432                 step=stream_cfg['ip_addrs_step'])
433
434         if stream_cfg['udp_port_step'] == 'random':
435             src_fv_port = STLVmFlowVarRepeatableRandom(
436                 name="p_src",
437                 min_value=udp_args['sport'],
438                 max_value=udp_args['sport_max'],
439                 size=2,
440                 seed=random.randint(0, 32767),
441                 limit=udp_args['udp_src_count'])
442             dst_fv_port = STLVmFlowVarRepeatableRandom(
443                 name="p_dst",
444                 min_value=udp_args['dport'],
445                 max_value=udp_args['dport_max'],
446                 size=2,
447                 seed=random.randint(0, 32767),
448                 limit=stream_cfg['udp_dst_count'])
449         else:
450             src_fv_port = STLVmFlowVar(
451                 name="p_src",
452                 min_value=udp_args['sport'],
453                 max_value=udp_args['sport_max'],
454                 size=2,
455                 op="inc",
456                 step=udp_args['sport_step'])
457             dst_fv_port = STLVmFlowVar(
458                 name="p_dst",
459                 min_value=udp_args['dport'],
460                 max_value=udp_args['dport_max'],
461                 size=2,
462                 op="inc",
463                 step=udp_args['dport_step'])
464         vm_param = [
465             src_fv_ip,
466             STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
467             src_fv_port,
468             STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
469             dst_fv_ip,
470             STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
471             dst_fv_port,
472             STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
473             STLVmFixChecksumHw(l3_offset="IP:{}".format(encap_level),
474                                l4_offset="UDP:{}".format(encap_level),
475                                l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP)
476         ]
477         pad = max(0, frame_size - len(pkt_base)) * 'x'
478
479         return STLPktBuilder(pkt=pkt_base / pad,
480                              vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
481
482     def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
483                          e2e=False):
484         """Create a list of streams corresponding to a given chain and stream config.
485
486         port: port where the streams originate (0 or 1)
487         chain_id: the chain to which the streams are associated to
488         stream_cfg: stream configuration
489         l2frame: L2 frame size (including 4-byte FCS) or 'IMIX'
490         latency: if True also create a latency stream
491         e2e: True if performing "end to end" connectivity check
492         """
493         streams = []
494         pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
495         if self.config.no_flow_stats:
496             LOG.info("Traffic flow statistics are disabled.")
497         if l2frame == 'IMIX':
498             for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
499                 pkt = self._create_pkt(stream_cfg, l2_frame_size)
500                 if e2e or stream_cfg['mpls']:
501                     streams.append(STLStream(packet=pkt,
502                                              mode=STLTXCont(pps=ratio)))
503                 else:
504                     if stream_cfg['vxlan'] is True:
505                         streams.append(STLStream(packet=pkt,
506                                                  flow_stats=STLFlowStats(pg_id=pg_id,
507                                                                          vxlan=True)
508                                                  if not self.config.no_flow_stats else None,
509                                                  mode=STLTXCont(pps=ratio)))
510                     else:
511                         streams.append(STLStream(packet=pkt,
512                                                  flow_stats=STLFlowStats(pg_id=pg_id)
513                                                  if not self.config.no_flow_stats else None,
514                                                  mode=STLTXCont(pps=ratio)))
515
516             if latency:
517                 # for IMIX, the latency packets have the average IMIX packet size
518                 pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
519
520         else:
521             l2frame_size = int(l2frame)
522             pkt = self._create_pkt(stream_cfg, l2frame_size)
523             if e2e or stream_cfg['mpls']:
524                 streams.append(STLStream(packet=pkt,
525                                          # Flow stats is disabled for MPLS now
526                                          # flow_stats=STLFlowStats(pg_id=pg_id),
527                                          mode=STLTXCont()))
528             else:
529                 if stream_cfg['vxlan'] is True:
530                     streams.append(STLStream(packet=pkt,
531                                              flow_stats=STLFlowStats(pg_id=pg_id,
532                                                                      vxlan=True)
533                                              if not self.config.no_flow_stats else None,
534                                              mode=STLTXCont()))
535                 else:
536                     streams.append(STLStream(packet=pkt,
537                                              flow_stats=STLFlowStats(pg_id=pg_id)
538                                              if not self.config.no_flow_stats else None,
539                                              mode=STLTXCont()))
540             # for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
541             # without vlan, the min l2 frame size is 64
542             # with vlan it is 68
543             # This only applies to the latency stream
544             if latency and stream_cfg['vlan_tag'] and l2frame_size < 68:
545                 pkt = self._create_pkt(stream_cfg, 68)
546
547         if latency:
548             if self.config.no_latency_stats:
549                 LOG.info("Latency flow statistics are disabled.")
550             if stream_cfg['vxlan'] is True:
551                 streams.append(STLStream(packet=pkt,
552                                          flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
553                                                                         vxlan=True)
554                                          if not self.config.no_latency_stats else None,
555                                          mode=STLTXCont(pps=self.LATENCY_PPS)))
556             else:
557                 streams.append(STLStream(packet=pkt,
558                                          flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
559                                          if not self.config.no_latency_stats else None,
560                                          mode=STLTXCont(pps=self.LATENCY_PPS)))
561         return streams
562
563     @timeout(5)
564     def __connect(self, client):
565         client.connect()
566
567     def __connect_after_start(self):
568         # after start, Trex may take a bit of time to initialize
569         # so we need to retry a few times
570         for it in range(self.config.generic_retry_count):
571             try:
572                 time.sleep(1)
573                 self.client.connect()
574                 break
575             except Exception as ex:
576                 if it == (self.config.generic_retry_count - 1):
577                     raise
578                 LOG.info("Retrying connection to TRex (%s)...", ex.msg)
579
580     def connect(self):
581         """Connect to the TRex server."""
582         server_ip = self.generator_config.ip
583         LOG.info("Connecting to TRex (%s)...", server_ip)
584
585         # Connect to TRex server
586         self.client = STLClient(server=server_ip, sync_port=self.generator_config.zmq_rpc_port,
587                                 async_port=self.generator_config.zmq_pub_port)
588         try:
589             self.__connect(self.client)
590             if server_ip == '127.0.0.1':
591                 config_updated = self.__check_config()
592                 if config_updated or self.config.restart:
593                     self.__restart()
594         except (TimeoutError, STLError) as e:
595             if server_ip == '127.0.0.1':
596                 self.__start_local_server()
597             else:
598                 raise TrafficGeneratorException(e.message)
599
600         ports = list(self.generator_config.ports)
601         self.port_handle = ports
602         # Prepare the ports
603         self.client.reset(ports)
604         # Read HW information from each port
605         # this returns an array of dict (1 per port)
606         """
607         Example of output for Intel XL710
608         [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
609           'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
610           u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
611           u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
612           u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
613           'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
614           u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
615           'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
616           'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
617           'layer_mode': 'Ethernet', u'numa': 0}, ...]
618         """
619         self.port_info = self.client.get_port_info(ports)
620         LOG.info('Connected to TRex')
621         for id, port in enumerate(self.port_info):
622             LOG.info('   Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
623                      id, port['description'], port['speed'], port['src_mac'],
624                      port['pci_addr'], port['driver'])
625         # Make sure the 2 ports have the same speed
626         if self.port_info[0]['speed'] != self.port_info[1]['speed']:
627             raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
628                                             (self.port_info[0]['speed'],
629                                              self.port_info[1]['speed']))
630
631     def __start_local_server(self):
632         try:
633             LOG.info("Starting TRex ...")
634             self.__start_server()
635             self.__connect_after_start()
636         except (TimeoutError, STLError) as e:
637             LOG.error('Cannot connect to TRex')
638             LOG.error(traceback.format_exc())
639             logpath = '/tmp/trex.log'
640             if os.path.isfile(logpath):
641                 # Wait for TRex to finish writing error message
642                 last_size = 0
643                 for _ in range(self.config.generic_retry_count):
644                     size = os.path.getsize(logpath)
645                     if size == last_size:
646                         # probably not writing anymore
647                         break
648                     last_size = size
649                     time.sleep(1)
650                 with open(logpath, 'r') as f:
651                     message = f.read()
652             else:
653                 message = e.message
654             raise TrafficGeneratorException(message)
655
656     def __start_server(self):
657         server = TRexTrafficServer()
658         server.run_server(self.generator_config)
659
660     def __check_config(self):
661         server = TRexTrafficServer()
662         return server.check_config_updated(self.generator_config)
663
664     def __restart(self):
665         LOG.info("Restarting TRex ...")
666         self.__stop_server()
667         # Wait for server stopped
668         for _ in range(self.config.generic_retry_count):
669             time.sleep(1)
670             if not self.client.is_connected():
671                 LOG.info("TRex is stopped...")
672                 break
673         self.__start_local_server()
674
675     def __stop_server(self):
676         if self.generator_config.ip == '127.0.0.1':
677             ports = self.client.get_acquired_ports()
678             LOG.info('Release ports %s and stopping TRex...', ports)
679             try:
680                 if ports:
681                     self.client.release(ports=ports)
682                 self.client.server_shutdown()
683             except STLError as e:
684                 LOG.warning('Unable to stop TRex. Error: %s', e)
685         else:
686             LOG.info('Using remote TRex. Unable to stop TRex')
687
688     def resolve_arp(self):
689         """Resolve all configured remote IP addresses.
690
691         return: None if ARP failed to resolve for all IP addresses
692                 else a dict of list of dest macs indexed by port#
693                 the dest macs in the list are indexed by the chain id
694         """
695         self.client.set_service_mode(ports=self.port_handle)
696         LOG.info('Polling ARP until successful...')
697         arp_dest_macs = {}
698         for port, device in zip(self.port_handle, self.generator_config.devices):
699             # there should be 1 stream config per chain
700             stream_configs = device.get_stream_configs()
701             chain_count = len(stream_configs)
702             ctx = self.client.create_service_ctx(port=port)
703             # all dest macs on this port indexed by chain ID
704             dst_macs = [None] * chain_count
705             dst_macs_count = 0
706             # the index in the list is the chain id
707             if self.config.vxlan or self.config.mpls:
708                 arps = [
709                     ServiceARP(ctx,
710                                src_ip=device.vtep_src_ip,
711                                dst_ip=device.vtep_dst_ip,
712                                vlan=device.vtep_vlan)
713                     for cfg in stream_configs
714                 ]
715             else:
716                 arps = [
717                     ServiceARP(ctx,
718                                src_ip=cfg['ip_src_tg_gw'],
719                                dst_ip=cfg['mac_discovery_gw'],
720                                # will be None if no vlan tagging
721                                vlan=cfg['vlan_tag'])
722                     for cfg in stream_configs
723                 ]
724
725             for attempt in range(self.config.generic_retry_count):
726                 try:
727                     ctx.run(arps)
728                 except STLError:
729                     LOG.error(traceback.format_exc())
730                     continue
731
732                 unresolved = []
733                 for chain_id, mac in enumerate(dst_macs):
734                     if not mac:
735                         arp_record = arps[chain_id].get_record()
736                         if arp_record.dst_mac:
737                             dst_macs[chain_id] = arp_record.dst_mac
738                             dst_macs_count += 1
739                             LOG.info('   ARP: port=%d chain=%d src IP=%s dst IP=%s -> MAC=%s',
740                                      port, chain_id,
741                                      arp_record.src_ip,
742                                      arp_record.dst_ip, arp_record.dst_mac)
743                         else:
744                             unresolved.append(arp_record.dst_ip)
745                 if dst_macs_count == chain_count:
746                     arp_dest_macs[port] = dst_macs
747                     LOG.info('ARP resolved successfully for port %s', port)
748                     break
749
750                 retry = attempt + 1
751                 LOG.info('Retrying ARP for: %s (retry %d/%d)',
752                          unresolved, retry, self.config.generic_retry_count)
753                 if retry < self.config.generic_retry_count:
754                     time.sleep(self.config.generic_poll_sec)
755             else:
756                 LOG.error('ARP timed out for port %s (resolved %d out of %d)',
757                           port,
758                           dst_macs_count,
759                           chain_count)
760                 break
761
762         # if the capture from the TRex console was started before the arp request step,
763         # it keeps 'service_mode' enabled, otherwise, it disables the 'service_mode'
764         if not self.config.service_mode:
765             self.client.set_service_mode(ports=self.port_handle, enabled=False)
766         if len(arp_dest_macs) == len(self.port_handle):
767             return arp_dest_macs
768         return None
769
770     def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
771         """Check if rate provided by user is above requirements. Applies only if latency is True."""
772         intf_speed = self.generator_config.intf_speed
773         if latency:
774             if bidirectional:
775                 mult = 2
776                 total_rate = 0
777                 for rate in rates:
778                     r = utils.convert_rates(l2frame_size, rate, intf_speed)
779                     total_rate += int(r['rate_pps'])
780             else:
781                 mult = 1
782                 total_rate = utils.convert_rates(l2frame_size, rates[0], intf_speed)
783             # rate must be enough for latency stream and at least 1 pps for base stream per chain
784             required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
785             result = utils.convert_rates(l2frame_size,
786                                          {'rate_pps': required_rate},
787                                          intf_speed * mult)
788             result['result'] = total_rate >= required_rate
789             return result
790
791         return {'result': True}
792
793     def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
794         """Program all the streams in Trex server.
795
796         l2frame_size: L2 frame size or IMIX
797         rates: a list of 2 rates to run each direction
798                each rate is a dict like {'rate_pps': '10kpps'}
799         bidirectional: True if bidirectional
800         latency: True if latency measurement is needed
801         e2e: True if performing "end to end" connectivity check
802         """
803         r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
804         if not r['result']:
805             raise TrafficGeneratorException(
806                 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
807                 .format(pps=r['rate_pps'],
808                         bps=r['rate_bps'],
809                         load=r['rate_percent']))
810         # a dict of list of streams indexed by port#
811         # in case of fixed size, has self.chain_count * 2 * 2 streams
812         # (1 normal + 1 latency stream per direction per chain)
813         # for IMIX, has self.chain_count * 2 * 4 streams
814         # (3 normal + 1 latency stream per direction per chain)
815         streamblock = {}
816         for port in self.port_handle:
817             streamblock[port] = []
818         stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
819         self.rates = [utils.to_rate_str(rate) for rate in rates]
820         for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
821             streamblock[0].extend(self.generate_streams(self.port_handle[0],
822                                                         chain_id,
823                                                         fwd_stream_cfg,
824                                                         l2frame_size,
825                                                         latency=latency,
826                                                         e2e=e2e))
827             if len(self.rates) > 1:
828                 streamblock[1].extend(self.generate_streams(self.port_handle[1],
829                                                             chain_id,
830                                                             rev_stream_cfg,
831                                                             l2frame_size,
832                                                             latency=bidirectional and latency,
833                                                             e2e=e2e))
834
835         for port in self.port_handle:
836             if self.config.vxlan:
837                 self.client.set_port_attr(ports=port, vxlan_fs=[4789])
838             else:
839                 self.client.set_port_attr(ports=port, vxlan_fs=None)
840             self.client.add_streams(streamblock[port], ports=port)
841             LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
842
843     def clear_streamblock(self):
844         """Clear all streams from TRex."""
845         self.rates = []
846         self.client.reset(self.port_handle)
847         LOG.info('Cleared all existing streams')
848
849     def get_stats(self):
850         """Get stats from Trex."""
851         stats = self.client.get_stats()
852         return self.extract_stats(stats)
853
854     def get_macs(self):
855         """Return the Trex local port MAC addresses.
856
857         return: a list of MAC addresses indexed by the port#
858         """
859         return [port['src_mac'] for port in self.port_info]
860
861     def get_port_speed_gbps(self):
862         """Return the Trex local port MAC addresses.
863
864         return: a list of speed in Gbps indexed by the port#
865         """
866         return [port['speed'] for port in self.port_info]
867
868     def clear_stats(self):
869         """Clear all stats in the traffic gneerator."""
870         if self.port_handle:
871             self.client.clear_stats()
872
873     def start_traffic(self):
874         """Start generating traffic in all ports."""
875         for port, rate in zip(self.port_handle, self.rates):
876             self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
877
878     def stop_traffic(self):
879         """Stop generating traffic."""
880         self.client.stop(ports=self.port_handle)
881
882     def start_capture(self):
883         """Capture all packets on both ports that are unicast to us."""
884         if self.capture_id:
885             self.stop_capture()
886         # Need to filter out unwanted packets so we do not end up counting
887         # src MACs of frames that are not unicast to us
888         src_mac_list = self.get_macs()
889         bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
890         # ports must be set in service in order to enable capture
891         self.client.set_service_mode(ports=self.port_handle)
892         self.capture_id = self.client.start_capture \
893             (rx_ports=self.port_handle, bpf_filter=bpf_filter)
894
895     def fetch_capture_packets(self):
896         """Fetch capture packets in capture mode."""
897         if self.capture_id:
898             self.packet_list = []
899             self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
900                                               output=self.packet_list)
901
902     def stop_capture(self):
903         """Stop capturing packets."""
904         if self.capture_id:
905             self.client.stop_capture(capture_id=self.capture_id['id'])
906             self.capture_id = None
907             # if the capture from TRex console was started before the connectivity step,
908             # it keeps 'service_mode' enabled, otherwise, it disables the 'service_mode'
909             if not self.config.service_mode:
910                 self.client.set_service_mode(ports=self.port_handle, enabled=False)
911
912     def cleanup(self):
913         """Cleanup Trex driver."""
914         if self.client:
915             try:
916                 self.client.reset(self.port_handle)
917                 self.client.disconnect()
918             except STLError:
919                 # TRex does not like a reset while in disconnected state
920                 pass
921
922     def set_service_mode(self, enabled=True):
923         """Enable/disable the 'service_mode'."""
924         self.client.set_service_mode(ports=self.port_handle, enabled=enabled)