Merge "Add "volumeMounts" parameter in Kubernetes context"
[yardstick.git] / yardstick / network_services / traffic_profile / prox_binsearch.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """ Fixed traffic profile definitions """
15
16 from __future__ import absolute_import
17
18 import logging
19 import datetime
20 import time
21
22 from yardstick.network_services.traffic_profile.prox_profile import ProxProfile
23 from yardstick.network_services import constants
24
25 LOG = logging.getLogger(__name__)
26
27
28 class ProxBinSearchProfile(ProxProfile):
29     """
30     This profile adds a single stream at the beginning of the traffic session
31     """
32
33     def __init__(self, tp_config):
34         super(ProxBinSearchProfile, self).__init__(tp_config)
35         self.current_lower = self.lower_bound
36         self.current_upper = self.upper_bound
37
38     @property
39     def delta(self):
40         return self.current_upper - self.current_lower
41
42     @property
43     def mid_point(self):
44         return (self.current_lower + self.current_upper) / 2
45
46     def bounds_iterator(self, logger=None):
47         self.current_lower = self.lower_bound
48         self.current_upper = self.upper_bound
49
50         test_value = self.current_upper
51         while abs(self.delta) >= self.precision:
52             if logger:
53                 logger.debug("New interval [%s, %s), precision: %d", self.current_lower,
54                              self.current_upper, self.step_value)
55                 logger.info("Testing with value %s", test_value)
56
57             yield test_value
58             test_value = self.mid_point
59
60     def run_test_with_pkt_size(self, traffic_gen, pkt_size, duration):
61         """Run the test for a single packet size.
62
63         :param traffic_gen: traffic generator instance
64         :type traffic_gen: TrafficGen
65         :param  pkt_size: The packet size to test with.
66         :type pkt_size: int
67         :param  duration: The duration for each try.
68         :type duration: int
69
70         """
71
72         LOG.info("Testing with packet size %d", pkt_size)
73
74         # Binary search assumes the lower value of the interval is
75         # successful and the upper value is a failure.
76         # The first value that is tested, is the maximum value. If that
77         # succeeds, no more searching is needed. If it fails, a regular
78         # binary search is performed.
79         #
80         # The test_value used for the first iteration of binary search
81         # is adjusted so that the delta between this test_value and the
82         # upper bound is a power-of-2 multiple of precision. In the
83         # optimistic situation where this first test_value results in a
84         # success, the binary search will complete on an integer multiple
85         # of the precision, rather than on a fraction of it.
86
87         theor_max_thruput = 0
88
89         result_samples = {}
90
91         # Store one time only value in influxdb
92         single_samples = {
93             "test_duration": traffic_gen.scenario_helper.scenario_cfg["runner"]["duration"],
94             "test_precision": self.params["traffic_profile"]["test_precision"],
95             "tolerated_loss": self.params["traffic_profile"]["tolerated_loss"],
96             "duration": duration
97         }
98         self.queue.put(single_samples)
99         self.prev_time = time.time()
100
101         # throughput and packet loss from the most recent successful test
102         successful_pkt_loss = 0.0
103         line_speed = traffic_gen.scenario_helper.all_options.get(
104             "interface_speed_gbps", constants.NIC_GBPS_DEFAULT) * constants.ONE_GIGABIT_IN_BITS
105         for test_value in self.bounds_iterator(LOG):
106             result, port_samples = self._profile_helper.run_test(pkt_size, duration,
107                                                                  test_value,
108                                                                  self.tolerated_loss,
109                                                                  line_speed)
110             self.curr_time = time.time()
111             self.prev_time = self.curr_time
112
113             if result.success:
114                 LOG.debug("Success! Increasing lower bound")
115                 self.current_lower = test_value
116                 successful_pkt_loss = result.pkt_loss
117                 samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
118
119                 # store results with success tag in influxdb
120                 success_samples = {'Success_' + key: value for key, value in samples.items()}
121
122                 # Store number of packets based statistics (we already have throughput)
123                 success_samples["Success_rx_total"] = int(result.rx_total)
124                 success_samples["Success_tx_total"] = int(result.tx_total)
125                 success_samples["Success_can_be_lost"] = int(result.can_be_lost)
126                 success_samples["Success_drop_total"] = int(result.drop_total)
127                 self.queue.put(success_samples)
128
129                 # Store Actual throughput for result samples
130                 result_samples["Result_Actual_throughput"] = \
131                     success_samples["Success_RxThroughput"]
132             else:
133                 LOG.debug("Failure... Decreasing upper bound")
134                 self.current_upper = test_value
135                 samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
136                 # samples  contains data such as Latency, Throughput, number of packets
137                 # Hence they should not be divided by the time difference
138
139             if theor_max_thruput < samples["RequestedTxThroughput"]:
140                 theor_max_thruput = samples['RequestedTxThroughput']
141                 self.queue.put({'theor_max_throughput': theor_max_thruput})
142
143             LOG.debug("Collect TG KPIs %s %s", datetime.datetime.now(), samples)
144             self.queue.put(samples)
145
146         result_samples["Result_pktSize"] = pkt_size
147         result_samples["Result_theor_max_throughput"] = theor_max_thruput
148         self.queue.put(result_samples)