behave_tests: log nfvbench traffic runs
[nfvbench.git] / behave_tests / features / steps / steps.py
index f4dda58..314ae15 100644 (file)
@@ -25,6 +25,7 @@ import json
 import requests
 import subprocess
 from subprocess import DEVNULL
+from typing import Optional
 
 from nfvbench.summarizer import Formatter
 from nfvbench.traffic_gen.traffic_utils import parse_rate_str
@@ -42,11 +43,6 @@ def override_xtesting_project_name(context, project_name):
     context.data['PROJECT_NAME'] = project_name
 
 
-@given('TEST_DB_EXT_URL: {test_db_ext_url}')
-def override_xtesting_test_db_ext_url(context, test_db_ext_url):
-    context.data['TEST_DB_EXT_URL'] = test_db_ext_url
-
-
 @given('TEST_DB_URL: {test_db_url}')
 def override_xtesting_test_db_url(context, test_db_url):
     context.data['TEST_DB_URL'] = test_db_url
@@ -141,21 +137,26 @@ def add_percentage_rate(context, percentage_rate):
 @when('NFVbench API is ready')
 @when('NFVbench API is ready on host {host_ip}')
 @when('NFVbench API is ready on host {host_ip} and port {port:d}')
-def start_server(context, host_ip="127.0.0.1", port=7555):
-    context.host_ip = host_ip
-    context.port = port
+def start_server(context, host_ip: Optional[str]=None, port: Optional[int]=None):
+    # NFVbench server host IP and port number have been setup from environment variables (see
+    # environment.py:before_all()).   Here we allow to override them from feature files:
+    if host_ip is not None:
+        context.host_ip = host_ip
+    if port is not None:
+        context.port = port
+
     try:
         # check if API is already available
         requests.get(
             "http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
     except RequestException:
         cmd = ["nfvbench", "-c", context.data['config'], "--server"]
-        if host_ip != "127.0.0.1":
+        if context.host_ip != "127.0.0.1":
             cmd.append("--host")
-            cmd.append(host_ip)
-        if port != 7555:
+            cmd.append(context.host_ip)
+        if context.port != 7555:
             cmd.append("--port")
-            cmd.append(port)
+            cmd.append(str(context.port))
 
         subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
 
@@ -167,7 +168,11 @@ def start_server(context, host_ip="127.0.0.1", port=7555):
 
 @then('run is started and waiting for result')
 @then('{repeat:d} runs are started and waiting for maximum result')
-def step_impl(context, repeat=1):
+def run_nfvbench_traffic(context, repeat=1):
+    context.logger.info(f"run_nfvbench_traffic: fs={context.json['frame_sizes'][0]} "
+                        f"fc={context.json['flow_count']} "
+                        f"rate={context.json['rate']} repeat={repeat}")
+
     results = []
     if 'json' not in context.json:
         context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
@@ -189,20 +194,33 @@ def step_impl(context, repeat=1):
         results.append(result)
         assert result["status"] == STATUS_OK
 
+        # Log latest result:
+        total_tx_rate = extract_value(result, "total_tx_rate")
+        overall = extract_value(result, "overall")
+        avg_delay_usec = extract_value(overall, "avg_delay_usec")
+        context.logger.info(f"run_nfvbench_traffic: result #{i+1}: "
+                            f"total_tx_rate(pps)={total_tx_rate:,} "  # Add ',' thousand separator
+                            f"avg_latency_usec={round(avg_delay_usec)}")
 
+    # Keep only the result with the highest rate:
     context.result = reduce(
         lambda x, y: x if extract_value(x, "total_tx_rate") > extract_value(y,
                                                                             "total_tx_rate") else y,
         results)
 
     total_tx_rate = extract_value(context.result, "total_tx_rate")
-    context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
     overall = extract_value(context.result, "overall")
     avg_delay_usec = extract_value(overall, "avg_delay_usec")
     # create a synthesis with offered pps and latency values
     context.synthesis['total_tx_rate'] = total_tx_rate
     context.synthesis['avg_delay_usec'] = avg_delay_usec
 
+    # Log max result only when we did two nfvbench runs or more:
+    if repeat > 1:
+        context.logger.info(f"run_nfvbench_traffic: max result: "
+                            f"total_tx_rate(pps)={total_tx_rate:,} "
+                            f"avg_latency_usec={round(avg_delay_usec)}")
+
 
 @then('extract offered rate result')
 def save_rate_result(context):
@@ -227,6 +245,36 @@ def get_latency_result_from_database(context, threshold='90%'):
     if last_result:
         compare_latency_values(context, last_result, threshold)
 
+
+@then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
+def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
+    """Check latency result against a fixed threshold.
+
+    Check that the average latency measured during the current scenario run is
+    lower or equal to the provided fixed reference value.
+
+    Args:
+        context: The context data of the current scenario run.  It includes the
+            test results for that run.
+
+        max_avg_latency_usec: Reference value to be used as a threshold.  This
+            is a maximum average latency expressed in microseconds.
+
+    Raises:
+        AssertionError: The latency result is strictly greater than the reference value.
+
+    """
+    # Get the just measured average latency (a float):
+    new_avg_latency_usec = context.synthesis['avg_delay_usec']
+
+    # Compare measured value to reference:
+    if new_avg_latency_usec > max_avg_latency_usec:
+        raise AssertionError("Average latency higher than max threshold: "
+                             "{avg_latency} usec > {threshold} usec".format(
+                                 avg_latency=round(new_avg_latency_usec),
+                                 threshold=round(max_avg_latency_usec)))
+
+
 @then(
     'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
@@ -343,7 +391,7 @@ def throughput_comparison(context, old_throughput_pps=None, threshold=None, refe
                 threshold) * old_throughput_pps:
             raise AssertionError(
                 "Current run throughput {current_throughput_pps} is not over {threshold} "
-                " of previous value ({old_throughput_pps}pps)".format(
+                " of previous value ({old_throughput_pps})".format(
                     current_throughput_pps=Formatter.suffix('pps')(
                         Formatter.standard(current_throughput_pps)),
                     threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(