add yardstick iruya 9.0.0 release notes
[yardstick.git] / yardstick / network_services / traffic_profile / vpp_rfc2544.py
1 # Copyright (c) 2019 Viosoft Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import datetime
16 import ipaddress
17 import logging
18 import random
19 import string
20
21 from trex_stl_lib import api as Pkt
22 from trex_stl_lib import trex_stl_client
23 from trex_stl_lib import trex_stl_packet_builder_scapy
24 from trex_stl_lib import trex_stl_streams
25
26 from yardstick.common import constants
27 from yardstick.network_services.helpers.vpp_helpers.multiple_loss_ratio_search import \
28     MultipleLossRatioSearch
29 from yardstick.network_services.traffic_profile.rfc2544 import RFC2544Profile, \
30     PortPgIDMap
31 from yardstick.network_services.traffic_profile.trex_traffic_profile import IP, \
32     DST
33
34 LOGGING = logging.getLogger(__name__)
35
36
37 class VppRFC2544Profile(RFC2544Profile):
38
39     def __init__(self, traffic_generator):
40         super(VppRFC2544Profile, self).__init__(traffic_generator)
41
42         tp_cfg = traffic_generator["traffic_profile"]
43         self.number_of_intermediate_phases = tp_cfg.get("intermediate_phases",
44                                                         2)
45
46         self.duration = self.config.duration
47         self.precision = self.config.test_precision
48         self.lower_bound = self.config.lower_bound
49         self.upper_bound = self.config.upper_bound
50         self.step_interval = self.config.step_interval
51         self.enable_latency = self.config.enable_latency
52
53         self.pkt_size = None
54         self.flow = None
55
56         self.tolerance_low = 0
57         self.tolerance_high = 0
58
59         self.queue = None
60         self.port_pg_id = None
61
62         self.current_lower = self.lower_bound
63         self.current_upper = self.upper_bound
64
65         self.ports = []
66         self.profiles = {}
67
68     @property
69     def delta(self):
70         return self.current_upper - self.current_lower
71
72     @property
73     def mid_point(self):
74         return (self.current_lower + self.current_upper) / 2
75
76     @staticmethod
77     def calculate_frame_size(imix):
78         if not imix:
79             return 64, 100
80
81         imix_count = {size.upper().replace('B', ''): int(weight)
82                       for size, weight in imix.items()}
83         imix_sum = sum(imix_count.values())
84         if imix_sum <= 0:
85             return 64, 100
86         packets_total = sum([int(size) * weight
87                              for size, weight in imix_count.items()])
88         return packets_total / imix_sum, imix_sum
89
90     @staticmethod
91     def _gen_payload(length):
92         payload = ""
93         for _ in range(length):
94             payload += random.choice(string.ascii_letters)
95
96         return payload
97
98     def bounds_iterator(self, logger=None):
99         self.current_lower = self.lower_bound
100         self.current_upper = self.upper_bound
101
102         test_value = self.current_upper
103         while abs(self.delta) >= self.precision:
104             if logger:
105                 logger.debug("New interval [%s, %s), precision: %d",
106                              self.current_lower,
107                              self.current_upper, self.step_interval)
108                 logger.info("Testing with value %s", test_value)
109
110             yield test_value
111             test_value = self.mid_point
112
113     def register_generator(self, generator):
114         super(VppRFC2544Profile, self).register_generator(generator)
115         self.init_traffic_params(generator)
116
117     def init_queue(self, queue):
118         self.queue = queue
119         self.queue.cancel_join_thread()
120
121     def init_traffic_params(self, generator):
122         if generator.rfc2544_helper.latency:
123             self.enable_latency = True
124         self.tolerance_low = generator.rfc2544_helper.tolerance_low
125         self.tolerance_high = generator.rfc2544_helper.tolerance_high
126         self.max_rate = generator.scenario_helper.all_options.get('vpp_config',
127                                                                   {}).get(
128             'max_rate', self.rate)
129
130     def create_profile(self, profile_data, current_port):
131         streams = []
132         for packet_name in profile_data:
133             imix = (profile_data[packet_name].
134                     get('outer_l2', {}).get('framesize'))
135             self.pkt_size, imix_sum = self.calculate_frame_size(imix)
136             self._create_vm(profile_data[packet_name])
137             if self.max_rate > 100:
138                 imix_data = self._create_imix_data(imix,
139                                                    constants.DISTRIBUTION_IN_PACKETS)
140             else:
141                 imix_data = self._create_imix_data(imix)
142             _streams = self._create_single_stream(current_port, imix_data,
143                                                   imix_sum)
144             streams.extend(_streams)
145         return trex_stl_streams.STLProfile(streams)
146
147     def _set_outer_l3v4_fields(self, outer_l3v4):
148         """ setup outer l3v4 fields from traffic profile """
149         ip_params = {}
150         if 'proto' in outer_l3v4:
151             ip_params['proto'] = outer_l3v4['proto']
152         self._set_proto_fields(IP, **ip_params)
153
154         self.flow = int(outer_l3v4['count'])
155         src_start_ip, _ = outer_l3v4['srcip4'].split('-')
156         dst_start_ip, _ = outer_l3v4['dstip4'].split('-')
157
158         self.ip_packet = Pkt.IP(src=src_start_ip,
159                                 dst=dst_start_ip,
160                                 proto=outer_l3v4['proto'])
161         if self.flow > 1:
162             dst_start_int = int(ipaddress.ip_address(str(dst_start_ip)))
163             dst_end_ip_new = ipaddress.ip_address(
164                 dst_start_int + self.flow - 1)
165             # self._set_proto_addr(IP, SRC, outer_l3v4['srcip4'], outer_l3v4['count'])
166             self._set_proto_addr(IP, DST,
167                                  "{start_ip}-{end_ip}".format(
168                                      start_ip=dst_start_ip,
169                                      end_ip=str(dst_end_ip_new)),
170                                  self.flow)
171
172     def _create_single_packet(self, size=64):
173         ether_packet = self.ether_packet
174         ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
175         base_pkt = ether_packet / ip_packet
176         payload_len = max(0, size - len(base_pkt) - 4)
177         packet = trex_stl_packet_builder_scapy.STLPktBuilder(
178             pkt=base_pkt / self._gen_payload(payload_len),
179             vm=self.trex_vm)
180         packet_lat = trex_stl_packet_builder_scapy.STLPktBuilder(
181             pkt=base_pkt / self._gen_payload(payload_len))
182
183         return packet, packet_lat
184
185     def _create_single_stream(self, current_port, imix_data, imix_sum,
186                               isg=0.0):
187         streams = []
188         for size, weight in ((int(size), float(weight)) for (size, weight)
189                              in imix_data.items() if float(weight) > 0):
190             if current_port == 1:
191                 isg += 10.0
192             if self.max_rate > 100:
193                 mode = trex_stl_streams.STLTXCont(
194                     pps=int(weight * imix_sum / 100))
195                 mode_lat = mode
196             else:
197                 mode = trex_stl_streams.STLTXCont(
198                     percentage=weight * self.max_rate / 100)
199                 mode_lat = trex_stl_streams.STLTXCont(pps=9000)
200
201             packet, packet_lat = self._create_single_packet(size)
202             streams.append(
203                 trex_stl_client.STLStream(isg=isg, packet=packet, mode=mode))
204             if self.enable_latency:
205                 pg_id = self.port_pg_id.increase_pg_id(current_port)
206                 stl_flow = trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id)
207                 stream_lat = trex_stl_client.STLStream(isg=isg,
208                                                        packet=packet_lat,
209                                                        mode=mode_lat,
210                                                        flow_stats=stl_flow)
211                 streams.append(stream_lat)
212         return streams
213
214     def execute_traffic(self, traffic_generator=None):
215         if traffic_generator is not None and self.generator is None:
216             self.generator = traffic_generator
217
218         self.ports = []
219         self.profiles = {}
220         self.port_pg_id = PortPgIDMap()
221         for vld_id, intfs in sorted(self.generator.networks.items()):
222             profile_data = self.params.get(vld_id)
223             if not profile_data:
224                 continue
225             if (vld_id.startswith(self.DOWNLINK) and
226                     self.generator.rfc2544_helper.correlated_traffic):
227                 continue
228             for intf in intfs:
229                 current_port = int(self.generator.port_num(intf))
230                 self.port_pg_id.add_port(current_port)
231                 profile = self.create_profile(profile_data, current_port)
232                 self.generator.client.add_streams(profile,
233                                                   ports=[current_port])
234
235                 self.ports.append(current_port)
236                 self.profiles[current_port] = profile
237
238         timeout = self.generator.scenario_helper.scenario_cfg["runner"][
239             "duration"]
240         test_data = {
241             "test_duration": timeout,
242             "test_precision": self.precision,
243             "tolerated_loss": self.tolerance_high,
244             "duration": self.duration,
245             "packet_size": self.pkt_size,
246             "flow": self.flow
247         }
248
249         if self.max_rate > 100:
250             self.binary_search_with_optimized(self.generator, self.duration,
251                                               timeout, test_data)
252         else:
253             self.binary_search(self.generator, self.duration,
254                                self.tolerance_high, test_data)
255
256     def binary_search_with_optimized(self, traffic_generator, duration,
257                                      timeout, test_data):
258         self.queue.cancel_join_thread()
259         algorithm = MultipleLossRatioSearch(
260             measurer=traffic_generator, latency=self.enable_latency,
261             pkt_size=self.pkt_size,
262             final_trial_duration=duration,
263             final_relative_width=self.step_interval / 100,
264             number_of_intermediate_phases=self.number_of_intermediate_phases,
265             initial_trial_duration=1,
266             timeout=timeout)
267         algorithm.init_generator(self.ports, self.port_pg_id, self.profiles,
268                                  test_data, self.queue)
269         return algorithm.narrow_down_ndr_and_pdr(10000, self.max_rate,
270                                                  self.tolerance_high)
271
272     def binary_search(self, traffic_generator, duration, tolerance_value,
273                       test_data):
274         theor_max_thruput = 0
275         result_samples = {}
276
277         for test_value in self.bounds_iterator(LOGGING):
278             stats = traffic_generator.send_traffic_on_tg(self.ports,
279                                                          self.port_pg_id,
280                                                          duration,
281                                                          str(
282                                                              test_value / self.max_rate / 2),
283                                                          latency=self.enable_latency)
284             traffic_generator.client.reset(ports=self.ports)
285             traffic_generator.client.clear_stats(ports=self.ports)
286             traffic_generator.client.remove_all_streams(ports=self.ports)
287             for port, profile in self.profiles.items():
288                 traffic_generator.client.add_streams(profile, ports=[port])
289
290             loss_ratio = (float(traffic_generator.loss) / float(
291                 traffic_generator.sent)) * 100
292
293             samples = traffic_generator.generate_samples(stats, self.ports,
294                                                          self.port_pg_id,
295                                                          self.enable_latency)
296             samples.update(test_data)
297             LOGGING.info("Collect TG KPIs %s %s %s", datetime.datetime.now(),
298                          test_value, samples)
299             self.queue.put(samples)
300
301             if float(loss_ratio) > float(tolerance_value):
302                 LOGGING.debug("Failure... Decreasing upper bound")
303                 self.current_upper = test_value
304             else:
305                 LOGGING.debug("Success! Increasing lower bound")
306                 self.current_lower = test_value
307
308                 rate_total = float(traffic_generator.sent) / float(duration)
309                 bandwidth_total = float(rate_total) * (
310                         float(self.pkt_size) + 20) * 8 / (10 ** 9)
311
312                 success_samples = {'Result_' + key: value for key, value in
313                                    samples.items()}
314                 success_samples["Result_{}".format('PDR')] = {
315                     "rate_total_pps": float(rate_total),
316                     "bandwidth_total_Gbps": float(bandwidth_total),
317                     "packet_loss_ratio": float(loss_ratio),
318                     "packets_lost": int(traffic_generator.loss),
319                 }
320                 self.queue.put(success_samples)
321
322                 # Store Actual throughput for result samples
323                 for intf in traffic_generator.vnfd_helper.interfaces:
324                     name = intf["name"]
325                     result_samples[name] = {
326                         "Result_Actual_throughput": float(
327                             success_samples["Result_{}".format(name)][
328                                 "rx_throughput_bps"]),
329                     }
330
331             for intf in traffic_generator.vnfd_helper.interfaces:
332                 name = intf["name"]
333                 if theor_max_thruput < samples[name]["tx_throughput_bps"]:
334                     theor_max_thruput = samples[name]['tx_throughput_bps']
335                     self.queue.put({'theor_max_throughput': theor_max_thruput})
336
337         result_samples["Result_theor_max_throughput"] = theor_max_thruput
338         self.queue.put(result_samples)
339         return result_samples