Merge "support for Bottlenecks soak throughputs"
[yardstick.git] / yardstick / network_services / traffic_profile / rfc2544.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """ RFC2544 Throughput implemenation """
15
16 from __future__ import absolute_import
17 from __future__ import division
18 import logging
19
20 from trex_stl_lib.trex_stl_client import STLStream
21 from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
22 from trex_stl_lib.trex_stl_streams import STLTXCont
23
24 from yardstick.network_services.traffic_profile.trex_traffic_profile \
25     import TrexProfile
26
27 LOGGING = logging.getLogger(__name__)
28
29
30 class RFC2544Profile(TrexProfile):
31     """ This class handles rfc2544 implemenation. """
32
33     def __init__(self, traffic_generator):
34         super(RFC2544Profile, self).__init__(traffic_generator)
35         self.generator = None
36         self.max_rate = None
37         self.min_rate = None
38         self.ports = None
39         self.rate = 100
40         self.drop_percent_at_max_tx = None
41         self.throughput_max = None
42
43     def register_generator(self, generator):
44         self.generator = generator
45
46     def execute_traffic(self, traffic_generator=None):
47         """ Generate the stream and run traffic on the given ports """
48         if traffic_generator is not None and self.generator is None:
49             self.generator = traffic_generator
50
51         if self.ports is not None:
52             return
53
54         self.ports = []
55         for vld_id, intfs in sorted(self.generator.networks.items()):
56             profile_data = self.params.get(vld_id)
57             # no profile for this port
58             if not profile_data:
59                 continue
60             # correlated traffic doesn't use public traffic?
61             if vld_id.startswith(self.DOWNLINK) and \
62                 self.generator.rfc2544_helper.correlated_traffic:
63                 continue
64             for intf in intfs:
65                 port = self.generator.port_num(intf)
66                 self.ports.append(port)
67                 self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
68
69         self.max_rate = self.rate
70         self.min_rate = 0
71         self.generator.client.start(ports=self.ports, mult=self.get_multiplier(),
72                                     duration=30, force=True)
73         self.drop_percent_at_max_tx = 0
74         self.throughput_max = 0
75
76     def get_multiplier(self):
77         """ Get the rate at which next iteration to run """
78         self.rate = round((self.max_rate + self.min_rate) / 2.0, 2)
79         multiplier = round(self.rate / self.pps, 2)
80         return str(multiplier)
81
82     def get_drop_percentage(self, generator=None):
83         """ Calculate the drop percentage and run the traffic """
84         if generator is None:
85             generator = self.generator
86         run_duration = self.generator.RUN_DURATION
87         samples = self.generator.generate_samples(self.ports)
88
89         in_packets = sum([value['in_packets'] for value in samples.values()])
90         out_packets = sum([value['out_packets'] for value in samples.values()])
91
92         packet_drop = abs(out_packets - in_packets)
93         drop_percent = 100.0
94         try:
95             drop_percent = round((packet_drop / float(out_packets)) * 100, 5)
96         except ZeroDivisionError:
97             LOGGING.info('No traffic is flowing')
98
99         # TODO(esm): RFC2544 doesn't tolerate packet loss, why do we?
100         tolerance_low = generator.rfc2544_helper.tolerance_low
101         tolerance_high = generator.rfc2544_helper.tolerance_high
102
103         tx_rate = out_packets / run_duration
104         rx_rate = in_packets / run_duration
105
106         throughput_max = self.throughput_max
107         drop_percent_at_max_tx = self.drop_percent_at_max_tx
108
109         if self.drop_percent_at_max_tx is None:
110             self.rate = tx_rate
111             self.first_run = False
112
113         if drop_percent > tolerance_high:
114             # TODO(esm): why don't we discard results that are out of tolerance?
115             self.max_rate = self.rate
116             if throughput_max == 0:
117                 throughput_max = rx_rate
118                 drop_percent_at_max_tx = drop_percent
119
120         elif drop_percent >= tolerance_low:
121             # TODO(esm): why do we update the samples dict in this case
122             #            and not update our tracking values?
123             throughput_max = rx_rate
124             drop_percent_at_max_tx = drop_percent
125
126         elif drop_percent >= self.drop_percent_at_max_tx:
127             # TODO(esm): why don't we discard results that are out of tolerance?
128             self.min_rate = self.rate
129             self.drop_percent_at_max_tx = drop_percent_at_max_tx = drop_percent
130             self.throughput_max = throughput_max = rx_rate
131
132         else:
133             # TODO(esm): why don't we discard results that are out of tolerance?
134             self.min_rate = self.rate
135
136         generator.clear_client_stats(self.ports)
137         generator.start_client(self.ports, mult=self.get_multiplier(),
138                                duration=run_duration, force=True)
139
140         # if correlated traffic update the Throughput
141         if generator.rfc2544_helper.correlated_traffic:
142             throughput_max *= 2
143
144         samples.update({
145             'TxThroughput': tx_rate,
146             'RxThroughput': rx_rate,
147             'CurrentDropPercentage': drop_percent,
148             'Throughput': throughput_max,
149             'DropPercentage': drop_percent_at_max_tx,
150         })
151
152         return samples
153
154     def execute_latency(self, generator=None, samples=None):
155         if generator is not None and self.generator is None:
156             self.generator = generator
157
158         if samples is None:
159             samples = self.generator.generate_samples()
160
161         self.pps, multiplier = self.calculate_pps(samples)
162         self.ports = []
163         self.pg_id = self.params['traffic_profile'].get('pg_id', 1)
164         for vld_id, intfs in sorted(self.generator.networks.items()):
165             profile_data = self.params.get(vld_id)
166             if not profile_data:
167                 continue
168             # correlated traffic doesn't use public traffic?
169             if vld_id.startswith(self.DOWNLINK) and \
170                 self.generator.rfc2544_helper.correlated_traffic:
171                 continue
172             for intf in intfs:
173                 port = self.generator.port_num(intf)
174                 self.ports.append(port)
175                 self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
176
177         self.generator.start_client(ports=self.ports, mult=str(multiplier),
178                                     duration=120, force=True)
179         self.first_run = False
180
181     def calculate_pps(self, samples):
182         pps = round(samples['Throughput'] / 2, 2)
183         multiplier = round(self.rate / self.pps, 2)
184         return pps, multiplier
185
186     def create_single_stream(self, packet_size, pps, isg=0):
187         packet = self._create_single_packet(packet_size)
188         if pps:
189             stl_mode = STLTXCont(pps=pps)
190         else:
191             stl_mode = STLTXCont(pps=self.pps)
192         if self.pg_id:
193             LOGGING.debug("pg_id: %s", self.pg_id)
194             stl_flow_stats = STLFlowLatencyStats(pg_id=self.pg_id)
195             stream = STLStream(isg=isg, packet=packet, mode=stl_mode,
196                                flow_stats=stl_flow_stats)
197             self.pg_id += 1
198         else:
199             stream = STLStream(isg=isg, packet=packet, mode=stl_mode)
200         return stream