Merge "Collectd Fixes"
[yardstick.git] / yardstick / network_services / traffic_profile / rfc2544.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """ RFC2544 Throughput implemenation """
15
16 from __future__ import absolute_import
17 from __future__ import division
18 import logging
19
20 from trex_stl_lib.trex_stl_client import STLStream
21 from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
22 from trex_stl_lib.trex_stl_streams import STLTXCont
23
24 from yardstick.network_services.traffic_profile.traffic_profile \
25     import TrexProfile
26
27 LOGGING = logging.getLogger(__name__)
28
29
30 class RFC2544Profile(TrexProfile):
31     """ This class handles rfc2544 implemenation. """
32
33     def __init__(self, traffic_generator):
34         super(RFC2544Profile, self).__init__(traffic_generator)
35         self.generator = None
36         self.max_rate = None
37         self.min_rate = None
38         self.ports = None
39         self.rate = 100
40         self.drop_percent_at_max_tx = None
41         self.throughput_max = None
42
43     def register_generator(self, generator):
44         self.generator = generator
45
46     def execute(self, traffic_generator=None):
47         """ Generate the stream and run traffic on the given ports """
48         if traffic_generator is not None and self.generator is None:
49             self.generator = traffic_generator
50
51         if self.ports is not None:
52             return
53
54         self.ports = []
55         priv_ports = self.generator.priv_ports
56         pub_ports = self.generator.pub_ports
57         # start from 1 for private_1, public_1, etc.
58         for index, (priv_port, pub_port) in enumerate(zip(priv_ports, pub_ports), 1):
59             profile_data = self.params.get('private_{}'.format(index), '')
60             self.ports.append(priv_port)
61             # pass profile_data directly, don't use self.profile_data
62             self.generator.client.add_streams(self.get_streams(profile_data), ports=priv_port)
63             profile_data = self.params.get('public_{}'.format(index), '')
64             # correlated traffic doesn't use public traffic?
65             if not profile_data or self.generator.rfc2544_helper.correlated_traffic:
66                 continue
67             # just get the pub_port
68             self.ports.append(pub_port)
69             self.generator.client.add_streams(self.get_streams(profile_data), ports=pub_port)
70
71         self.max_rate = self.rate
72         self.min_rate = 0
73         self.generator.client.start(ports=self.ports, mult=self.get_multiplier(),
74                                     duration=30, force=True)
75         self.drop_percent_at_max_tx = 0
76         self.throughput_max = 0
77
78     def get_multiplier(self):
79         """ Get the rate at which next iteration to run """
80         self.rate = round((self.max_rate + self.min_rate) / 2.0, 2)
81         multiplier = round(self.rate / self.pps, 2)
82         return str(multiplier)
83
84     def get_drop_percentage(self, generator=None):
85         """ Calculate the drop percentage and run the traffic """
86         if generator is None:
87             generator = self.generator
88         run_duration = self.generator.RUN_DURATION
89         samples = self.generator.generate_samples()
90
91         in_packets = sum([value['in_packets'] for value in samples.values()])
92         out_packets = sum([value['out_packets'] for value in samples.values()])
93
94         packet_drop = abs(out_packets - in_packets)
95         drop_percent = 100.0
96         try:
97             drop_percent = round((packet_drop / float(out_packets)) * 100, 5)
98         except ZeroDivisionError:
99             LOGGING.info('No traffic is flowing')
100
101         # TODO(esm): RFC2544 doesn't tolerate packet loss, why do we?
102         tolerance_low = generator.rfc2544_helper.tolerance_low
103         tolerance_high = generator.rfc2544_helper.tolerance_high
104
105         tx_rate = out_packets / run_duration
106         rx_rate = in_packets / run_duration
107
108         throughput_max = self.throughput_max
109         drop_percent_at_max_tx = self.drop_percent_at_max_tx
110
111         if self.drop_percent_at_max_tx is None:
112             self.rate = tx_rate
113             self.first_run = False
114
115         if drop_percent > tolerance_high:
116             # TODO(esm): why don't we discard results that are out of tolerance?
117             self.max_rate = self.rate
118             if throughput_max == 0:
119                 throughput_max = rx_rate
120                 drop_percent_at_max_tx = drop_percent
121
122         elif drop_percent >= tolerance_low:
123             # TODO(esm): why do we update the samples dict in this case
124             #            and not update our tracking values?
125             throughput_max = rx_rate
126             drop_percent_at_max_tx = drop_percent
127
128         elif drop_percent >= self.drop_percent_at_max_tx:
129             # TODO(esm): why don't we discard results that are out of tolerance?
130             self.min_rate = self.rate
131             self.drop_percent_at_max_tx = drop_percent_at_max_tx = drop_percent
132             self.throughput_max = throughput_max = rx_rate
133
134         else:
135             # TODO(esm): why don't we discard results that are out of tolerance?
136             self.min_rate = self.rate
137
138         generator.clear_client_stats()
139         generator.start_client(mult=self.get_multiplier(),
140                                duration=run_duration, force=True)
141
142         # if correlated traffic update the Throughput
143         if generator.rfc2544_helper.correlated_traffic:
144             throughput_max *= 2
145
146         samples.update({
147             'TxThroughput': tx_rate,
148             'RxThroughput': rx_rate,
149             'CurrentDropPercentage': drop_percent,
150             'Throughput': throughput_max,
151             'DropPercentage': drop_percent_at_max_tx,
152         })
153
154         return samples
155
156     def execute_latency(self, generator=None, samples=None):
157         if generator is None:
158             generator = self.generator
159
160         if samples is None:
161             samples = generator.generate_samples()
162
163         self.pps, multiplier = self.calculate_pps(samples)
164         self.ports = []
165         self.pg_id = self.params['traffic_profile'].get('pg_id', 1)
166         priv_ports = generator.priv_ports
167         pub_ports = generator.pub_ports
168         for index, (priv_port, pub_port) in enumerate(zip(priv_ports, pub_ports), 1):
169             profile_data = self.params.get('private_{}'.format(index), '')
170             self.ports.append(priv_port)
171             generator.client.add_streams(self.get_streams(profile_data),
172                                          ports=priv_port)
173
174             profile_data = self.params.get('public_{}'.format(index), '')
175             if not profile_data or generator.correlated_traffic:
176                 continue
177
178             pub_port = generator.pub_ports[index]
179             self.ports.append(pub_port)
180             generator.client.add_streams(self.get_streams(profile_data),
181                                          ports=pub_port)
182
183         generator.start_client(ports=self.ports, mult=str(multiplier),
184                                duration=120, force=True)
185         self.first_run = False
186
187     def calculate_pps(self, samples):
188         pps = round(samples['Throughput'] / 2, 2)
189         multiplier = round(self.rate / self.pps, 2)
190         return pps, multiplier
191
192     def create_single_stream(self, packet_size, pps, isg=0):
193         packet = self._create_single_packet(packet_size)
194         if pps:
195             stl_mode = STLTXCont(pps=pps)
196         else:
197             stl_mode = STLTXCont(pps=self.pps)
198         if self.pg_id:
199             LOGGING.debug("pg_id: %s", self.pg_id)
200             stl_flow_stats = STLFlowLatencyStats(pg_id=self.pg_id)
201             stream = STLStream(isg=isg, packet=packet, mode=stl_mode,
202                                flow_stats=stl_flow_stats)
203             self.pg_id += 1
204         else:
205             stream = STLStream(isg=isg, packet=packet, mode=stl_mode)
206         return stream