import requests
import subprocess
from subprocess import DEVNULL
+from typing import Optional
from nfvbench.summarizer import Formatter
from nfvbench.traffic_gen.traffic_utils import parse_rate_str
context.data['PROJECT_NAME'] = project_name
-@given('TEST_DB_EXT_URL: {test_db_ext_url}')
-def override_xtesting_test_db_ext_url(context, test_db_ext_url):
- context.data['TEST_DB_EXT_URL'] = test_db_ext_url
-
-
@given('TEST_DB_URL: {test_db_url}')
def override_xtesting_test_db_url(context, test_db_url):
context.data['TEST_DB_URL'] = test_db_url
@when('NFVbench API is ready')
@when('NFVbench API is ready on host {host_ip}')
@when('NFVbench API is ready on host {host_ip} and port {port:d}')
-def start_server(context, host_ip="127.0.0.1", port=7555):
- context.host_ip = host_ip
- context.port = port
+def start_server(context, host_ip: Optional[str]=None, port: Optional[int]=None):
+ # NFVbench server host IP and port number have been setup from environment variables (see
+ # environment.py:before_all()). Here we allow to override them from feature files:
+ if host_ip is not None:
+ context.host_ip = host_ip
+ if port is not None:
+ context.port = port
+
try:
# check if API is already available
requests.get(
"http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
except RequestException:
cmd = ["nfvbench", "-c", context.data['config'], "--server"]
- if host_ip != "127.0.0.1":
+ if context.host_ip != "127.0.0.1":
cmd.append("--host")
- cmd.append(host_ip)
- if port != 7555:
+ cmd.append(context.host_ip)
+ if context.port != 7555:
cmd.append("--port")
- cmd.append(port)
+ cmd.append(str(context.port))
subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
if last_result:
compare_latency_values(context, last_result, threshold)
+
+@then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
+def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
+ """Check latency result against a fixed threshold.
+
+ Check that the average latency measured during the current scenario run is
+ lower or equal to the provided fixed reference value.
+
+ Args:
+ context: The context data of the current scenario run. It includes the
+ test results for that run.
+
+ max_avg_latency_usec: Reference value to be used as a threshold. This
+ is a maximum average latency expressed in microseconds.
+
+ Raises:
+ AssertionError: The latency result is strictly greater than the reference value.
+
+ """
+ # Get the just measured average latency (a float):
+ new_avg_latency_usec = context.synthesis['avg_delay_usec']
+
+ # Compare measured value to reference:
+ if new_avg_latency_usec > max_avg_latency_usec:
+ raise AssertionError("Average latency higher than max threshold: "
+ "{avg_latency} usec > {threshold} usec".format(
+ avg_latency=round(new_avg_latency_usec),
+ threshold=round(max_avg_latency_usec)))
+
+
@then(
'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
def compare_throughput_pps_result_with_range_values(context, min_reference_value,