behave_tests: code cleaning (TEST_DB_EXT_URL)
[nfvbench.git] / behave_tests / features / steps / steps.py
index f4dda58..41b3cf8 100644 (file)
@@ -25,6 +25,7 @@ import json
 import requests
 import subprocess
 from subprocess import DEVNULL
+from typing import Optional
 
 from nfvbench.summarizer import Formatter
 from nfvbench.traffic_gen.traffic_utils import parse_rate_str
@@ -42,11 +43,6 @@ def override_xtesting_project_name(context, project_name):
     context.data['PROJECT_NAME'] = project_name
 
 
-@given('TEST_DB_EXT_URL: {test_db_ext_url}')
-def override_xtesting_test_db_ext_url(context, test_db_ext_url):
-    context.data['TEST_DB_EXT_URL'] = test_db_ext_url
-
-
 @given('TEST_DB_URL: {test_db_url}')
 def override_xtesting_test_db_url(context, test_db_url):
     context.data['TEST_DB_URL'] = test_db_url
@@ -141,21 +137,26 @@ def add_percentage_rate(context, percentage_rate):
 @when('NFVbench API is ready')
 @when('NFVbench API is ready on host {host_ip}')
 @when('NFVbench API is ready on host {host_ip} and port {port:d}')
-def start_server(context, host_ip="127.0.0.1", port=7555):
-    context.host_ip = host_ip
-    context.port = port
+def start_server(context, host_ip: Optional[str]=None, port: Optional[int]=None):
+    # NFVbench server host IP and port number have been setup from environment variables (see
+    # environment.py:before_all()).   Here we allow to override them from feature files:
+    if host_ip is not None:
+        context.host_ip = host_ip
+    if port is not None:
+        context.port = port
+
     try:
         # check if API is already available
         requests.get(
             "http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
     except RequestException:
         cmd = ["nfvbench", "-c", context.data['config'], "--server"]
-        if host_ip != "127.0.0.1":
+        if context.host_ip != "127.0.0.1":
             cmd.append("--host")
-            cmd.append(host_ip)
-        if port != 7555:
+            cmd.append(context.host_ip)
+        if context.port != 7555:
             cmd.append("--port")
-            cmd.append(port)
+            cmd.append(str(context.port))
 
         subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
 
@@ -196,7 +197,6 @@ def step_impl(context, repeat=1):
         results)
 
     total_tx_rate = extract_value(context.result, "total_tx_rate")
-    context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
     overall = extract_value(context.result, "overall")
     avg_delay_usec = extract_value(overall, "avg_delay_usec")
     # create a synthesis with offered pps and latency values
@@ -227,6 +227,36 @@ def get_latency_result_from_database(context, threshold='90%'):
     if last_result:
         compare_latency_values(context, last_result, threshold)
 
+
+@then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
+def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
+    """Check latency result against a fixed threshold.
+
+    Check that the average latency measured during the current scenario run is
+    lower or equal to the provided fixed reference value.
+
+    Args:
+        context: The context data of the current scenario run.  It includes the
+            test results for that run.
+
+        max_avg_latency_usec: Reference value to be used as a threshold.  This
+            is a maximum average latency expressed in microseconds.
+
+    Raises:
+        AssertionError: The latency result is strictly greater than the reference value.
+
+    """
+    # Get the just measured average latency (a float):
+    new_avg_latency_usec = context.synthesis['avg_delay_usec']
+
+    # Compare measured value to reference:
+    if new_avg_latency_usec > max_avg_latency_usec:
+        raise AssertionError("Average latency higher than max threshold: "
+                             "{avg_latency} usec > {threshold} usec".format(
+                                 avg_latency=round(new_avg_latency_usec),
+                                 threshold=round(max_avg_latency_usec)))
+
+
 @then(
     'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
@@ -343,7 +373,7 @@ def throughput_comparison(context, old_throughput_pps=None, threshold=None, refe
                 threshold) * old_throughput_pps:
             raise AssertionError(
                 "Current run throughput {current_throughput_pps} is not over {threshold} "
-                " of previous value ({old_throughput_pps}pps)".format(
+                " of previous value ({old_throughput_pps})".format(
                     current_throughput_pps=Formatter.suffix('pps')(
                         Formatter.standard(current_throughput_pps)),
                     threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(