Merge "Add "volumeMounts" parameter in Kubernetes context"
[yardstick.git] / yardstick / network_services / traffic_profile / prox_binsearch.py
index 5700f98..225ee43 100644 (file)
@@ -20,6 +20,7 @@ import datetime
 import time
 
 from yardstick.network_services.traffic_profile.prox_profile import ProxProfile
+from yardstick.network_services import constants
 
 LOG = logging.getLogger(__name__)
 
@@ -89,21 +90,24 @@ class ProxBinSearchProfile(ProxProfile):
 
         # Store one time only value in influxdb
         single_samples = {
-            "test_duration" : traffic_gen.scenario_helper.scenario_cfg["runner"]["duration"],
-            "test_precision" : self.params["traffic_profile"]["test_precision"],
-            "tolerated_loss" : self.params["traffic_profile"]["tolerated_loss"],
-            "duration" : duration
+            "test_duration": traffic_gen.scenario_helper.scenario_cfg["runner"]["duration"],
+            "test_precision": self.params["traffic_profile"]["test_precision"],
+            "tolerated_loss": self.params["traffic_profile"]["tolerated_loss"],
+            "duration": duration
         }
         self.queue.put(single_samples)
         self.prev_time = time.time()
 
         # throughput and packet loss from the most recent successful test
         successful_pkt_loss = 0.0
+        line_speed = traffic_gen.scenario_helper.all_options.get(
+            "interface_speed_gbps", constants.NIC_GBPS_DEFAULT) * constants.ONE_GIGABIT_IN_BITS
         for test_value in self.bounds_iterator(LOG):
             result, port_samples = self._profile_helper.run_test(pkt_size, duration,
-                                                                 test_value, self.tolerated_loss)
+                                                                 test_value,
+                                                                 self.tolerated_loss,
+                                                                 line_speed)
             self.curr_time = time.time()
-            diff_time = self.curr_time - self.prev_time
             self.prev_time = self.curr_time
 
             if result.success:
@@ -111,15 +115,15 @@ class ProxBinSearchProfile(ProxProfile):
                 self.current_lower = test_value
                 successful_pkt_loss = result.pkt_loss
                 samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
-                samples["TxThroughput"] = samples["TxThroughput"] * 1000 * 1000
 
                 # store results with success tag in influxdb
                 success_samples = {'Success_' + key: value for key, value in samples.items()}
 
-                success_samples["Success_rx_total"] = int(result.rx_total / diff_time)
-                success_samples["Success_tx_total"] = int(result.tx_total / diff_time)
-                success_samples["Success_can_be_lost"] = int(result.can_be_lost / diff_time)
-                success_samples["Success_drop_total"] = int(result.drop_total / diff_time)
+                # Store number of packets based statistics (we already have throughput)
+                success_samples["Success_rx_total"] = int(result.rx_total)
+                success_samples["Success_tx_total"] = int(result.tx_total)
+                success_samples["Success_can_be_lost"] = int(result.can_be_lost)
+                success_samples["Success_drop_total"] = int(result.drop_total)
                 self.queue.put(success_samples)
 
                 # Store Actual throughput for result samples
@@ -129,20 +133,16 @@ class ProxBinSearchProfile(ProxProfile):
                 LOG.debug("Failure... Decreasing upper bound")
                 self.current_upper = test_value
                 samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
+                # samples  contains data such as Latency, Throughput, number of packets
+                # Hence they should not be divided by the time difference
 
-            for k in samples:
-                    tmp = samples[k]
-                    if isinstance(tmp, dict):
-                        for k2 in tmp:
-                            samples[k][k2] = int(samples[k][k2] / diff_time)
-
-            if theor_max_thruput < samples["TxThroughput"]:
-                theor_max_thruput = samples['TxThroughput']
+            if theor_max_thruput < samples["RequestedTxThroughput"]:
+                theor_max_thruput = samples['RequestedTxThroughput']
                 self.queue.put({'theor_max_throughput': theor_max_thruput})
 
             LOG.debug("Collect TG KPIs %s %s", datetime.datetime.now(), samples)
             self.queue.put(samples)
 
         result_samples["Result_pktSize"] = pkt_size
-        result_samples["Result_theor_max_throughput"] = theor_max_thruput/ (1000 * 1000)
+        result_samples["Result_theor_max_throughput"] = theor_max_thruput
         self.queue.put(result_samples)