# under the License.
#
-from functools import reduce
-
from behave import given
from behave import when
from behave import then
import requests
import subprocess
from subprocess import DEVNULL
+from typing import Optional
from nfvbench.summarizer import Formatter
from nfvbench.traffic_gen.traffic_utils import parse_rate_str
context.data['PROJECT_NAME'] = project_name
-@given('TEST_DB_EXT_URL: {test_db_ext_url}')
-def override_xtesting_test_db_ext_url(context, test_db_ext_url):
- context.data['TEST_DB_EXT_URL'] = test_db_ext_url
-
-
@given('TEST_DB_URL: {test_db_url}')
def override_xtesting_test_db_url(context, test_db_url):
context.data['TEST_DB_URL'] = test_db_url
@when('NFVbench API is ready')
@when('NFVbench API is ready on host {host_ip}')
@when('NFVbench API is ready on host {host_ip} and port {port:d}')
-def start_server(context, host_ip="127.0.0.1", port=7555):
- context.host_ip = host_ip
- context.port = port
+def start_server(context, host_ip: Optional[str]=None, port: Optional[int]=None):
+ # NFVbench server host IP and port number have been setup from environment variables (see
+ # environment.py:before_all()). Here we allow to override them from feature files:
+ if host_ip is not None:
+ context.host_ip = host_ip
+ if port is not None:
+ context.port = port
+
try:
# check if API is already available
requests.get(
"http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
except RequestException:
cmd = ["nfvbench", "-c", context.data['config'], "--server"]
- if host_ip != "127.0.0.1":
+ if context.host_ip != "127.0.0.1":
cmd.append("--host")
- cmd.append(host_ip)
- if port != 7555:
+ cmd.append(context.host_ip)
+ if context.port != 7555:
cmd.append("--port")
- cmd.append(port)
+ cmd.append(str(context.port))
subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
@then('run is started and waiting for result')
@then('{repeat:d} runs are started and waiting for maximum result')
-def step_impl(context, repeat=1):
- results = []
+def run_nfvbench_traffic(context, repeat=1):
+ context.logger.info(f"run_nfvbench_traffic: fs={context.json['frame_sizes'][0]} "
+ f"fc={context.json['flow_count']} "
+ f"rate={context.json['rate']} repeat={repeat}")
+
if 'json' not in context.json:
context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
'/nfvbench-' + context.tag + '-fs_' + \
context.json['flow_count'] + '-rate_' + \
context.json['rate'] + '.json'
json_base_name = context.json['json']
+
+ max_total_tx_rate = None
+ # rem: don't init with 0 in case nfvbench gets crazy and returns a negative packet rate
+
for i in range(repeat):
if repeat > 1:
context.json['json'] = json_base_name.strip('.json') + '-' + str(i) + '.json'
+ # Start nfvbench traffic and wait result:
url = "http://{ip}:{port}/start_run".format(ip=context.host_ip, port=context.port)
payload = json.dumps(context.json)
r = requests.post(url, data=payload, headers={'Content-Type': 'application/json'})
context.request_id = json.loads(r.text)["request_id"]
assert r.status_code == 200
result = wait_result(context)
- results.append(result)
assert result["status"] == STATUS_OK
+ # Extract useful metrics from result:
+ total_tx_rate = extract_value(result, "total_tx_rate")
+ overall = extract_value(result, "overall")
+ avg_delay_usec = extract_value(overall, "avg_delay_usec")
- context.result = reduce(
- lambda x, y: x if extract_value(x, "total_tx_rate") > extract_value(y,
- "total_tx_rate") else y,
- results)
+ # Log latest result:
+ context.logger.info(f"run_nfvbench_traffic: result #{i+1}: "
+ f"total_tx_rate(pps)={total_tx_rate:,} " # Add ',' thousand separator
+ f"avg_latency_usec={round(avg_delay_usec)}")
- total_tx_rate = extract_value(context.result, "total_tx_rate")
- context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
- overall = extract_value(context.result, "overall")
- avg_delay_usec = extract_value(overall, "avg_delay_usec")
- # create a synthesis with offered pps and latency values
- context.synthesis['total_tx_rate'] = total_tx_rate
- context.synthesis['avg_delay_usec'] = avg_delay_usec
+ # Keep only the result with the highest packet rate:
+ if max_total_tx_rate is None or total_tx_rate > max_total_tx_rate:
+ max_total_tx_rate = total_tx_rate
+ context.result = result
+ context.synthesis['total_tx_rate'] = total_tx_rate
+ context.synthesis['avg_delay_usec'] = avg_delay_usec
+
+ # Log max result only when we did two nfvbench runs or more:
+ if repeat > 1:
+ context.logger.info(f"run_nfvbench_traffic: max result: "
+ f"total_tx_rate(pps)={context.synthesis['total_tx_rate']:,} "
+ f"avg_latency_usec={round(context.synthesis['avg_delay_usec'])}")
@then('extract offered rate result')
if last_result:
compare_latency_values(context, last_result, threshold)
+
+@then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
+def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
+ """Check latency result against a fixed threshold.
+
+ Check that the average latency measured during the current scenario run is
+ lower or equal to the provided fixed reference value.
+
+ Args:
+ context: The context data of the current scenario run. It includes the
+ test results for that run.
+
+ max_avg_latency_usec: Reference value to be used as a threshold. This
+ is a maximum average latency expressed in microseconds.
+
+ Raises:
+ AssertionError: The latency result is strictly greater than the reference value.
+
+ """
+ # Get the just measured average latency (a float):
+ new_avg_latency_usec = context.synthesis['avg_delay_usec']
+
+ # Compare measured value to reference:
+ if new_avg_latency_usec > max_avg_latency_usec:
+ raise AssertionError("Average latency higher than max threshold: "
+ "{avg_latency} usec > {threshold} usec".format(
+ avg_latency=round(new_avg_latency_usec),
+ threshold=round(max_avg_latency_usec)))
+
+
@then(
'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
def compare_throughput_pps_result_with_range_values(context, min_reference_value,