from behave import given
from behave import when
from behave import then
+from copy import deepcopy
from requests import RequestException
from retry import retry
import json
from nfvbench.summarizer import Formatter
from nfvbench.traffic_gen.traffic_utils import parse_rate_str
+from testapi import TestapiClient, nfvbench_input_to_str
+
+
STATUS_ERROR = "ERROR"
STATUS_OK = "OK"
context.percentage_rate = percentage_rate
rate = percentage_previous_rate(context, percentage_rate)
context.json['rate'] = rate
+ context.logger.info(f"add_percentage_rate: {percentage_rate} => rate={rate}")
+
+
+@given('packet rate equal to {percentage} of max throughput of last characterization')
+def add_packet_rate(context, percentage: str):
+ """Update nfvbench run config with packet rate based on reference value.
+
+ For the already configured frame size and flow count, retrieve the max
+ throughput obtained during the latest successful characterization run. Then
+ retain `percentage` of this value for the packet rate and update `context`.
+
+ Args:
+ context: The context data of the current scenario run. It includes the
+ testapi endpoints to retrieve the reference values.
+
+ percentage: String representation of the percentage of the reference max
+ throughput. Example: "70%"
+
+ Updates context:
+ context.percentage_rate: percentage of reference max throughput
+ using a string representation. Example: "70%"
+
+ context.json['rate']: packet rate in packets per second using a string
+ representation. Example: "2000pps"
+
+ Raises:
+ ValueError: invalid percentage string
+
+ AssertionError: cannot find reference throughput value
+
+ """
+ # Validate percentage
+ if not percentage.endswith('%'):
+ raise ValueError('Invalid percentage string: "{0}"'.format(percentage))
+ percentage_float = convert_percentage_str_to_float(percentage)
+
+ # Retrieve nfvbench results report from testapi for:
+ # - the latest throughput scenario inside a characterization feature that passed
+ # - the test duration, frame size and flow count given in context.json
+ # - (optionally) the user_label and flavor_type given in context.json
+ # - the 'ndr' rate
+ testapi_params = {"project_name": context.data['PROJECT_NAME'],
+ "case_name": "characterization"}
+ nfvbench_test_conditions = deepcopy(context.json)
+ nfvbench_test_conditions['rate'] = 'ndr'
+ testapi_client = TestapiClient(testapi_url=context.data['TEST_DB_URL'],
+ logger=context.logger)
+ last_result = testapi_client.find_last_result(testapi_params,
+ scenario_tag="throughput",
+ nfvbench_test_input=nfvbench_test_conditions)
+ if last_result is None:
+ error_msg = "No characterization result found for scenario_tag=throughput"
+ error_msg += " and nfvbench test conditions "
+ error_msg += nfvbench_input_to_str(nfvbench_test_conditions)
+ raise AssertionError(error_msg)
+
+ # From the results report, extract the max throughput in packets per second
+ total_tx_rate = extract_value(last_result["output"], "total_tx_rate")
+ context.logger.info("add_packet_rate: max throughput of last characterization (pps): "
+ f"{total_tx_rate:,}")
+
+ # Compute the desired packet rate
+ rate = round(total_tx_rate * percentage_float)
+ context.logger.info(f"add_packet_rate: percentage={percentage} rate(pps)={rate:,}")
+
+ # Build rate string using a representation understood by nfvbench
+ rate_str = str(rate) + "pps"
+
+ # Update context
+ context.percentage_rate = percentage
+ context.json['rate'] = rate_str
"""When steps."""
@when('NFVbench API is ready on host {host_ip}')
@when('NFVbench API is ready on host {host_ip} and port {port:d}')
def start_server(context, host_ip: Optional[str]=None, port: Optional[int]=None):
+ """Start nfvbench server if needed and wait until it is ready.
+
+ Quickly check whether nfvbench HTTP server is ready by reading the "/status"
+ page. If not, start the server locally. Then wait until nfvbench API is
+ ready by polling the "/status" page.
+
+ This code is useful when behave and nfvbench run on the same machine. In
+ particular, it is needed to run behave tests with nfvbench Docker container.
+
+ There is currently no way to prevent behave from starting automatically
+ nfvbench server when this is not desirable, for instance when behave is
+ started using ansible-role-nfvbench. The user or the orchestration layer
+ should make sure nfvbench API is ready before starting behave tests.
+
+ """
# NFVbench server host IP and port number have been setup from environment variables (see
# environment.py:before_all()). Here we allow to override them from feature files:
if host_ip is not None:
if port is not None:
context.port = port
+ nfvbench_test_url = "http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port)
+ context.logger.info("start_server: test nfvbench API on URL: " + nfvbench_test_url)
+
try:
# check if API is already available
- requests.get(
- "http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
+ requests.get(nfvbench_test_url)
except RequestException:
+ context.logger.info("nfvbench server not running")
+
cmd = ["nfvbench", "-c", context.data['config'], "--server"]
if context.host_ip != "127.0.0.1":
cmd.append("--host")
cmd.append("--port")
cmd.append(str(context.port))
+ context.logger.info("Start nfvbench server with command: " + " ".join(cmd))
+
subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
- test_nfvbench_api(context)
+ # Wait until nfvbench API is ready
+ test_nfvbench_api(nfvbench_test_url)
"""Then steps."""
f"rate={context.json['rate']} repeat={repeat}")
if 'json' not in context.json:
+ # Build filename for nfvbench results in JSON format
context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
- '/nfvbench-' + context.tag + '-fs_' + \
- context.json['frame_sizes'][0] + '-fc_' + \
- context.json['flow_count'] + '-rate_' + \
- context.json['rate'] + '.json'
+ '/nfvbench-' + context.tag + \
+ '-fs_' + context.json['frame_sizes'][0] + \
+ '-fc_' + context.json['flow_count']
+ if context.percentage_rate is not None:
+ # Add rate as a percentage, eg '-rate_70%'
+ context.json['json'] += '-rate_' + context.percentage_rate
+ else:
+ # Add rate in bits or packets per second, eg '-rate_15Gbps' or '-rate_10kpps'
+ context.json['json'] += '-rate_' + context.json['rate']
+ context.json['json'] += '.json'
+
json_base_name = context.json['json']
max_total_tx_rate = None
# Get the just measured average latency (a float):
new_avg_latency_usec = context.synthesis['avg_delay_usec']
+ # Log what we test:
+ context.logger.info("check_latency_result_against_fixed_threshold(usec): "
+ "{value}<={ref}?".format(
+ value=round(new_avg_latency_usec),
+ ref=round(max_avg_latency_usec)))
+
# Compare measured value to reference:
if new_avg_latency_usec > max_avg_latency_usec:
raise AssertionError("Average latency higher than max threshold: "
- "{avg_latency} usec > {threshold} usec".format(
- avg_latency=round(new_avg_latency_usec),
- threshold=round(max_avg_latency_usec)))
+ "{value} usec > {ref} usec".format(
+ value=round(new_avg_latency_usec),
+ ref=round(max_avg_latency_usec)))
@then(
@retry(AssertionError, tries=24, delay=5.0, logger=None)
-def test_nfvbench_api(context):
+def test_nfvbench_api(nfvbench_test_url: str):
try:
- r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
+ r = requests.get(nfvbench_test_url)
assert r.status_code == 200
assert json.loads(r.text)["error_message"] == "no pending NFVbench run"
except RequestException as exc:
def get_result_from_input_values(input, result):
+ """Check test conditions in scenario results input.
+
+ Check whether the input parameters of a behave scenario results record from
+ testapi match the input parameters of the latest test. In other words,
+ check that the test results from testapi come from a test done under the
+ same conditions (frame size, flow count, rate, ...)
+
+ Args:
+ input: input dict of a results dict of a behave scenario from testapi
+
+ result: dict of nfvbench params used during the last test
+
+ Returns:
+ True if test conditions match, else False.
+
+ """
# Select required keys (other keys can be not set or unconsistent between scenarios)
required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
if 'user_label' in result:
return tagged_result
if last_results["pagination"]["current_page"] < last_results["pagination"]["total_pages"]:
page = last_results["pagination"]["current_page"] + 1
- return get_last_result(context, page)
+ return get_last_result(context, reference, page)
return None