Build VPP startup configuration file
[yardstick.git] / yardstick / network_services / traffic_profile / vpp_rfc2544.py
1 # Copyright (c) 2019 Viosoft Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import datetime
16 import logging
17 from random import choice
18 from string import ascii_letters
19
20 from ipaddress import ip_address
21 from trex_stl_lib import api as Pkt
22 from trex_stl_lib import trex_stl_client
23 from trex_stl_lib import trex_stl_packet_builder_scapy
24 from trex_stl_lib import trex_stl_streams
25
26 from yardstick.common import constants
27 from yardstick.network_services.traffic_profile.rfc2544 import RFC2544Profile, \
28     PortPgIDMap
29 from yardstick.network_services.traffic_profile.trex_traffic_profile import IP, \
30     DST
31
32 LOGGING = logging.getLogger(__name__)
33
34
35 class VppRFC2544Profile(RFC2544Profile):
36
37     def __init__(self, traffic_generator):
38         super(VppRFC2544Profile, self).__init__(traffic_generator)
39
40         self.duration = self.config.duration
41         self.precision = self.config.test_precision
42         self.lower_bound = self.config.lower_bound
43         self.upper_bound = self.config.upper_bound
44         self.step_interval = self.config.step_interval
45         self.enable_latency = self.config.enable_latency
46
47         self.pkt_size = None
48         self.flow = None
49
50         self.tolerance_low = 0
51         self.tolerance_high = 0
52
53         self.queue = None
54         self.port_pg_id = None
55
56         self.current_lower = self.lower_bound
57         self.current_upper = self.upper_bound
58
59         self.ports = []
60         self.profiles = {}
61
62     @property
63     def delta(self):
64         return self.current_upper - self.current_lower
65
66     @property
67     def mid_point(self):
68         return (self.current_lower + self.current_upper) / 2
69
70     @staticmethod
71     def calculate_frame_size(imix):
72         if not imix:
73             return 64, 100
74
75         imix_count = {size.upper().replace('B', ''): int(weight)
76                       for size, weight in imix.items()}
77         imix_sum = sum(imix_count.values())
78         if imix_sum <= 0:
79             return 64, 100
80         packets_total = sum([int(size) * weight
81                              for size, weight in imix_count.items()])
82         return packets_total / imix_sum, imix_sum
83
84     @staticmethod
85     def _gen_payload(length):
86         payload = ""
87         for _ in range(length):
88             payload += choice(ascii_letters)
89
90         return payload
91
92     def bounds_iterator(self, logger=None):
93         self.current_lower = self.lower_bound
94         self.current_upper = self.upper_bound
95
96         test_value = self.current_upper
97         while abs(self.delta) >= self.precision:
98             if logger:
99                 logger.debug("New interval [%s, %s), precision: %d",
100                              self.current_lower,
101                              self.current_upper, self.step_interval)
102                 logger.info("Testing with value %s", test_value)
103
104             yield test_value
105             test_value = self.mid_point
106
107     def register_generator(self, generator):
108         super(VppRFC2544Profile, self).register_generator(generator)
109         self.init_traffic_params(generator)
110
111     def init_queue(self, queue):
112         self.queue = queue
113         self.queue.cancel_join_thread()
114
115     def init_traffic_params(self, generator):
116         if generator.rfc2544_helper.latency:
117             self.enable_latency = True
118         self.tolerance_low = generator.rfc2544_helper.tolerance_low
119         self.tolerance_high = generator.rfc2544_helper.tolerance_high
120         self.max_rate = generator.scenario_helper.all_options.get('vpp_config',
121                                                                   {}).get(
122             'max_rate', self.rate)
123
124     def create_profile(self, profile_data, current_port):
125         streams = []
126         for packet_name in profile_data:
127             imix = (profile_data[packet_name].
128                     get('outer_l2', {}).get('framesize'))
129             self.pkt_size, imix_sum = self.calculate_frame_size(imix)
130             self._create_vm(profile_data[packet_name])
131             if self.max_rate > 100:
132                 imix_data = self._create_imix_data(imix,
133                                                    constants.DISTRIBUTION_IN_PACKETS)
134             else:
135                 imix_data = self._create_imix_data(imix)
136             _streams = self._create_single_stream(current_port, imix_data,
137                                                   imix_sum)
138             streams.extend(_streams)
139         return trex_stl_streams.STLProfile(streams)
140
141     def _set_outer_l3v4_fields(self, outer_l3v4):
142         """ setup outer l3v4 fields from traffic profile """
143         ip_params = {}
144         if 'proto' in outer_l3v4:
145             ip_params['proto'] = outer_l3v4['proto']
146         self._set_proto_fields(IP, **ip_params)
147
148         self.flow = int(outer_l3v4['count'])
149         src_start_ip, _ = outer_l3v4['srcip4'].split('-')
150         dst_start_ip, _ = outer_l3v4['dstip4'].split('-')
151
152         self.ip_packet = Pkt.IP(src=src_start_ip,
153                                 dst=dst_start_ip,
154                                 proto=outer_l3v4['proto'])
155         if self.flow > 1:
156             dst_start_int = int(ip_address(str(dst_start_ip)))
157             dst_end_ip_new = ip_address(dst_start_int + self.flow - 1)
158             # self._set_proto_addr(IP, SRC, outer_l3v4['srcip4'], outer_l3v4['count'])
159             self._set_proto_addr(IP, DST,
160                                  "{start_ip}-{end_ip}".format(
161                                      start_ip=dst_start_ip,
162                                      end_ip=str(dst_end_ip_new)),
163                                  self.flow)
164
165     def _create_single_packet(self, size=64):
166         ether_packet = self.ether_packet
167         ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
168         base_pkt = ether_packet / ip_packet
169         payload_len = max(0, size - len(base_pkt) - 4)
170         packet = trex_stl_packet_builder_scapy.STLPktBuilder(
171             pkt=base_pkt / self._gen_payload(payload_len),
172             vm=self.trex_vm)
173         packet_lat = trex_stl_packet_builder_scapy.STLPktBuilder(
174             pkt=base_pkt / self._gen_payload(payload_len))
175
176         return packet, packet_lat
177
178     def _create_single_stream(self, current_port, imix_data, imix_sum,
179                               isg=0.0):
180         streams = []
181         for size, weight in ((int(size), float(weight)) for (size, weight)
182                              in imix_data.items() if float(weight) > 0):
183             if current_port == 1:
184                 isg += 10.0
185             if self.max_rate > 100:
186                 mode = trex_stl_streams.STLTXCont(
187                     pps=int(weight * imix_sum / 100))
188                 mode_lat = mode
189             else:
190                 mode = trex_stl_streams.STLTXCont(
191                     percentage=weight * self.max_rate / 100)
192                 mode_lat = trex_stl_streams.STLTXCont(pps=9000)
193
194             packet, packet_lat = self._create_single_packet(size)
195             streams.append(
196                 trex_stl_client.STLStream(isg=isg, packet=packet, mode=mode))
197             if self.enable_latency:
198                 pg_id = self.port_pg_id.increase_pg_id(current_port)
199                 stl_flow = trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id)
200                 stream_lat = trex_stl_client.STLStream(isg=isg,
201                                                        packet=packet_lat,
202                                                        mode=mode_lat,
203                                                        flow_stats=stl_flow)
204                 streams.append(stream_lat)
205         return streams
206
207     def execute_traffic(self, traffic_generator=None):
208         if traffic_generator is not None and self.generator is None:
209             self.generator = traffic_generator
210
211         self.ports = []
212         self.profiles = {}
213         self.port_pg_id = PortPgIDMap()
214         for vld_id, intfs in sorted(self.generator.networks.items()):
215             profile_data = self.params.get(vld_id)
216             if not profile_data:
217                 continue
218             if (vld_id.startswith(self.DOWNLINK) and
219                     self.generator.rfc2544_helper.correlated_traffic):
220                 continue
221             for intf in intfs:
222                 current_port = int(self.generator.port_num(intf))
223                 self.port_pg_id.add_port(current_port)
224                 profile = self.create_profile(profile_data, current_port)
225                 self.generator.client.add_streams(profile,
226                                                   ports=[current_port])
227
228                 self.ports.append(current_port)
229                 self.profiles[current_port] = profile
230
231         timeout = self.generator.scenario_helper.scenario_cfg["runner"][
232             "duration"]
233         test_data = {
234             "test_duration": timeout,
235             "test_precision": self.precision,
236             "tolerated_loss": self.tolerance_high,
237             "duration": self.duration,
238             "packet_size": self.pkt_size,
239             "flow": self.flow
240         }
241
242         if self.max_rate > 100:
243             self.binary_search_with_optimized(self.generator, self.duration,
244                                               timeout, test_data)
245         else:
246             self.binary_search(self.generator, self.duration,
247                                self.tolerance_high, test_data)
248
249     def binary_search_with_optimized(self, traffic_generator, duration,
250                                      timeout, test_data):
251         # TODO Support FD.io Multiple Loss Ratio search (MLRsearch)
252         pass
253
254     def binary_search(self, traffic_generator, duration, tolerance_value,
255                       test_data):
256         theor_max_thruput = 0
257         result_samples = {}
258
259         for test_value in self.bounds_iterator(LOGGING):
260             stats = traffic_generator.send_traffic_on_tg(self.ports,
261                                                          self.port_pg_id,
262                                                          duration,
263                                                          str(
264                                                              test_value / self.max_rate / 2),
265                                                          latency=self.enable_latency)
266             traffic_generator.client.reset(ports=self.ports)
267             traffic_generator.client.clear_stats(ports=self.ports)
268             traffic_generator.client.remove_all_streams(ports=self.ports)
269             for port, profile in self.profiles.items():
270                 traffic_generator.client.add_streams(profile, ports=[port])
271
272             loss_ratio = (float(traffic_generator.loss) / float(
273                 traffic_generator.sent)) * 100
274
275             samples = traffic_generator.generate_samples(stats, self.ports,
276                                                          self.port_pg_id,
277                                                          self.enable_latency)
278             samples.update(test_data)
279             LOGGING.info("Collect TG KPIs %s %s %s", datetime.datetime.now(),
280                          test_value, samples)
281             self.queue.put(samples)
282
283             if float(loss_ratio) > float(tolerance_value):
284                 LOGGING.debug("Failure... Decreasing upper bound")
285                 self.current_upper = test_value
286             else:
287                 LOGGING.debug("Success! Increasing lower bound")
288                 self.current_lower = test_value
289
290                 rate_total = float(traffic_generator.sent) / float(duration)
291                 bandwidth_total = float(rate_total) * (
292                         float(self.pkt_size) + 20) * 8 / (10 ** 9)
293
294                 success_samples = {'Result_' + key: value for key, value in
295                                    samples.items()}
296                 success_samples["Result_{}".format('PDR')] = {
297                     "rate_total_pps": float(rate_total),
298                     "bandwidth_total_Gbps": float(bandwidth_total),
299                     "packet_loss_ratio": float(loss_ratio),
300                     "packets_lost": int(traffic_generator.loss),
301                 }
302                 self.queue.put(success_samples)
303
304                 # Store Actual throughput for result samples
305                 for intf in traffic_generator.vnfd_helper.interfaces:
306                     name = intf["name"]
307                     result_samples[name] = {
308                         "Result_Actual_throughput": float(
309                             success_samples["Result_{}".format(name)][
310                                 "rx_throughput_bps"]),
311                     }
312
313             for intf in traffic_generator.vnfd_helper.interfaces:
314                 name = intf["name"]
315                 if theor_max_thruput < samples[name]["tx_throughput_bps"]:
316                     theor_max_thruput = samples[name]['tx_throughput_bps']
317                     self.queue.put({'theor_max_throughput': theor_max_thruput})
318
319         result_samples["Result_theor_max_throughput"] = theor_max_thruput
320         self.queue.put(result_samples)
321         return result_samples