2 # Copyright 2021 Orange
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 from behave import given
18 from behave import when
19 from behave import then
20 from copy import deepcopy
21 from requests import RequestException
22 from retry import retry
26 from subprocess import DEVNULL
27 from typing import Optional
29 from nfvbench.summarizer import Formatter
30 from nfvbench.traffic_gen.traffic_utils import parse_rate_str
32 from testapi import TestapiClient, nfvbench_input_to_str
35 STATUS_ERROR = "ERROR"
43 @given('PROJECT_NAME: {project_name}')
44 def override_xtesting_project_name(context, project_name):
45 context.data['PROJECT_NAME'] = project_name
48 @given('TEST_DB_URL: {test_db_url}')
49 def override_xtesting_test_db_url(context, test_db_url):
50 context.data['TEST_DB_URL'] = test_db_url
51 context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
54 @given('INSTALLER_TYPE: {installer_type}')
55 def override_xtesting_installer_type(context, installer_type):
56 context.data['INSTALLER_TYPE'] = installer_type
59 @given('DEPLOY_SCENARIO: {deploy_scenario}')
60 def override_xtesting_deploy_scenario(context, deploy_scenario):
61 context.data['DEPLOY_SCENARIO'] = deploy_scenario
64 @given('NODE_NAME: {node_name}')
65 def override_xtesting_node_name(context, node_name):
66 context.data['NODE_NAME'] = node_name
69 @given('BUILD_TAG: {build_tag}')
70 def override_xtesting_build_tag(context, build_tag):
71 context.data['BUILD_TAG'] = build_tag
74 @given('NFVbench config from file: {config_path}')
75 def init_config(context, config_path):
76 context.data['config'] = config_path
79 @given('a JSON NFVbench config')
80 def init_config_from_json(context):
81 context.json.update(json.loads(context.text))
84 @given('log file: {log_file_path}')
85 def log_config(context, log_file_path):
86 context.json['log_file'] = log_file_path
89 @given('json file: {json_file_path}')
90 def json_config(context, json_file_path):
91 context.json['json'] = json_file_path
95 def add_no_clean_up_flag(context):
96 context.json['no_cleanup'] = 'true'
99 @given('TRex is restarted')
100 def add_restart(context):
101 context.json['restart'] = 'true'
104 @given('{label} label')
105 def add_label(context, label):
106 context.json['label'] = label
109 @given('{frame_size} frame size')
110 def add_frame_size(context, frame_size):
111 context.json['frame_sizes'] = [frame_size]
114 @given('{flow_count} flow count')
115 def add_flow_count(context, flow_count):
116 context.json['flow_count'] = flow_count
119 @given('{rate} rate')
120 def add_rate(context, rate):
121 context.json['rate'] = rate
124 @given('{duration} sec run duration')
125 def add_duration(context, duration):
126 context.json['duration_sec'] = duration
129 @given('{percentage_rate} rate of previous scenario')
130 def add_percentage_rate(context, percentage_rate):
131 context.percentage_rate = percentage_rate
132 rate = percentage_previous_rate(context, percentage_rate)
133 context.json['rate'] = rate
134 context.logger.info(f"add_percentage_rate: {percentage_rate} => rate={rate}")
137 @given('packet rate equal to {percentage} of max throughput of last characterization')
138 def add_packet_rate(context, percentage: str):
139 """Update nfvbench run config with packet rate based on reference value.
141 For the already configured frame size and flow count, retrieve the max
142 throughput obtained during the latest successful characterization run. Then
143 retain `percentage` of this value for the packet rate and update `context`.
146 context: The context data of the current scenario run. It includes the
147 testapi endpoints to retrieve the reference values.
149 percentage: String representation of the percentage of the reference max
150 throughput. Example: "70%"
153 context.percentage_rate: percentage of reference max throughput
154 using a string representation. Example: "70%"
156 context.json['rate']: packet rate in packets per second using a string
157 representation. Example: "2000pps"
160 ValueError: invalid percentage string
162 AssertionError: cannot find reference throughput value
165 # Validate percentage
166 if not percentage.endswith('%'):
167 raise ValueError('Invalid percentage string: "{0}"'.format(percentage))
168 percentage_float = convert_percentage_str_to_float(percentage)
170 # Retrieve nfvbench results report from testapi for:
171 # - the latest throughput scenario inside a characterization feature that passed
172 # - the test duration, frame size and flow count given in context.json
173 # - (optionally) the user_label and flavor_type given in context.json
175 testapi_params = {"project_name": context.data['PROJECT_NAME'],
176 "case_name": "characterization"}
177 nfvbench_test_conditions = deepcopy(context.json)
178 nfvbench_test_conditions['rate'] = 'ndr'
179 testapi_client = TestapiClient(testapi_url=context.data['TEST_DB_URL'],
180 logger=context.logger)
181 last_result = testapi_client.find_last_result(testapi_params,
182 scenario_tag="throughput",
183 nfvbench_test_input=nfvbench_test_conditions)
184 if last_result is None:
185 error_msg = "No characterization result found for scenario_tag=throughput"
186 error_msg += " and nfvbench test conditions "
187 error_msg += nfvbench_input_to_str(nfvbench_test_conditions)
188 raise AssertionError(error_msg)
190 # From the results report, extract the max throughput in packets per second
191 total_tx_rate = extract_value(last_result["output"], "total_tx_rate")
192 context.logger.info("add_packet_rate: max throughput of last characterization (pps): "
193 f"{total_tx_rate:,}")
195 # Compute the desired packet rate
196 rate = round(total_tx_rate * percentage_float)
197 context.logger.info(f"add_packet_rate: percentage={percentage} rate(pps)={rate:,}")
199 # Build rate string using a representation understood by nfvbench
200 rate_str = str(rate) + "pps"
203 context.percentage_rate = percentage
204 context.json['rate'] = rate_str
210 @when('NFVbench API is ready')
211 @when('NFVbench API is ready on host {host_ip}')
212 @when('NFVbench API is ready on host {host_ip} and port {port:d}')
213 def start_server(context, host_ip: Optional[str]=None, port: Optional[int]=None):
214 """Start nfvbench server if needed and wait until it is ready.
216 Quickly check whether nfvbench HTTP server is ready by reading the "/status"
217 page. If not, start the server locally. Then wait until nfvbench API is
218 ready by polling the "/status" page.
220 This code is useful when behave and nfvbench run on the same machine. In
221 particular, it is needed to run behave tests with nfvbench Docker container.
223 There is currently no way to prevent behave from starting automatically
224 nfvbench server when this is not desirable, for instance when behave is
225 started using ansible-role-nfvbench. The user or the orchestration layer
226 should make sure nfvbench API is ready before starting behave tests.
229 # NFVbench server host IP and port number have been setup from environment variables (see
230 # environment.py:before_all()). Here we allow to override them from feature files:
231 if host_ip is not None:
232 context.host_ip = host_ip
236 nfvbench_test_url = "http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port)
237 context.logger.info("start_server: test nfvbench API on URL: " + nfvbench_test_url)
240 # check if API is already available
241 requests.get(nfvbench_test_url)
242 except RequestException:
243 context.logger.info("nfvbench server not running")
245 cmd = ["nfvbench", "-c", context.data['config'], "--server"]
246 if context.host_ip != "127.0.0.1":
248 cmd.append(context.host_ip)
249 if context.port != 7555:
251 cmd.append(str(context.port))
253 context.logger.info("Start nfvbench server with command: " + " ".join(cmd))
255 subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
257 # Wait until nfvbench API is ready
258 test_nfvbench_api(nfvbench_test_url)
264 @then('run is started and waiting for result')
265 @then('{repeat:d} runs are started and waiting for maximum result')
266 def run_nfvbench_traffic(context, repeat=1):
267 context.logger.info(f"run_nfvbench_traffic: fs={context.json['frame_sizes'][0]} "
268 f"fc={context.json['flow_count']} "
269 f"rate={context.json['rate']} repeat={repeat}")
271 if 'json' not in context.json:
272 context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
273 '/nfvbench-' + context.tag + '-fs_' + \
274 context.json['frame_sizes'][0] + '-fc_' + \
275 context.json['flow_count'] + '-rate_' + \
276 context.json['rate'] + '.json'
277 json_base_name = context.json['json']
279 max_total_tx_rate = None
280 # rem: don't init with 0 in case nfvbench gets crazy and returns a negative packet rate
282 for i in range(repeat):
284 context.json['json'] = json_base_name.strip('.json') + '-' + str(i) + '.json'
286 # Start nfvbench traffic and wait result:
287 url = "http://{ip}:{port}/start_run".format(ip=context.host_ip, port=context.port)
288 payload = json.dumps(context.json)
289 r = requests.post(url, data=payload, headers={'Content-Type': 'application/json'})
290 context.request_id = json.loads(r.text)["request_id"]
291 assert r.status_code == 200
292 result = wait_result(context)
293 assert result["status"] == STATUS_OK
295 # Extract useful metrics from result:
296 total_tx_rate = extract_value(result, "total_tx_rate")
297 overall = extract_value(result, "overall")
298 avg_delay_usec = extract_value(overall, "avg_delay_usec")
301 context.logger.info(f"run_nfvbench_traffic: result #{i+1}: "
302 f"total_tx_rate(pps)={total_tx_rate:,} " # Add ',' thousand separator
303 f"avg_latency_usec={round(avg_delay_usec)}")
305 # Keep only the result with the highest packet rate:
306 if max_total_tx_rate is None or total_tx_rate > max_total_tx_rate:
307 max_total_tx_rate = total_tx_rate
308 context.result = result
309 context.synthesis['total_tx_rate'] = total_tx_rate
310 context.synthesis['avg_delay_usec'] = avg_delay_usec
312 # Log max result only when we did two nfvbench runs or more:
314 context.logger.info(f"run_nfvbench_traffic: max result: "
315 f"total_tx_rate(pps)={context.synthesis['total_tx_rate']:,} "
316 f"avg_latency_usec={round(context.synthesis['avg_delay_usec'])}")
319 @then('extract offered rate result')
320 def save_rate_result(context):
321 total_tx_rate = extract_value(context.result, "total_tx_rate")
322 context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
325 @then('verify throughput result is in same range as the previous result')
326 @then('verify throughput result is greater than {threshold} of the previous result')
327 def get_throughput_result_from_database(context, threshold='90%'):
328 last_result = get_last_result(context)
331 compare_throughput_values(context, last_result, threshold)
334 @then('verify latency result is in same range as the previous result')
335 @then('verify latency result is greater than {threshold} of the previous result')
336 def get_latency_result_from_database(context, threshold='90%'):
337 last_result = get_last_result(context)
340 compare_latency_values(context, last_result, threshold)
343 @then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
344 def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
345 """Check latency result against a fixed threshold.
347 Check that the average latency measured during the current scenario run is
348 lower or equal to the provided fixed reference value.
351 context: The context data of the current scenario run. It includes the
352 test results for that run.
354 max_avg_latency_usec: Reference value to be used as a threshold. This
355 is a maximum average latency expressed in microseconds.
358 AssertionError: The latency result is strictly greater than the reference value.
361 # Get the just measured average latency (a float):
362 new_avg_latency_usec = context.synthesis['avg_delay_usec']
365 context.logger.info("check_latency_result_against_fixed_threshold(usec): "
366 "{value}<={ref}?".format(
367 value=round(new_avg_latency_usec),
368 ref=round(max_avg_latency_usec)))
370 # Compare measured value to reference:
371 if new_avg_latency_usec > max_avg_latency_usec:
372 raise AssertionError("Average latency higher than max threshold: "
373 "{value} usec > {ref} usec".format(
374 value=round(new_avg_latency_usec),
375 ref=round(max_avg_latency_usec)))
379 'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
380 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
381 max_reference_value):
383 reference_values = [min_reference_value + 'pps', max_reference_value + 'pps']
384 throughput_comparison(context, reference_values=reference_values)
388 'verify result is in [{min_reference_value}bps, {max_reference_value}bps] range for throughput')
389 def compare_throughput_bps_result_with_range_values(context, min_reference_value,
390 max_reference_value):
392 reference_values = [min_reference_value + 'bps', max_reference_value + 'bps']
393 throughput_comparison(context, reference_values=reference_values)
396 @then('verify result is in {reference_values} range for latency')
397 def compare_result_with_range_values(context, reference_values):
398 latency_comparison(context, reference_values=reference_values)
401 @then('verify throughput result is in same range as the characterization result')
402 @then('verify throughput result is greater than {threshold} of the characterization result')
403 def get_characterization_throughput_result_from_database(context, threshold='90%'):
404 last_result = get_last_result(context, True)
406 raise AssertionError("No characterization result found.")
407 compare_throughput_values(context, last_result, threshold)
410 @then('verify latency result is in same range as the characterization result')
411 @then('verify latency result is greater than {threshold} of the characterization result')
412 def get_characterization_latency_result_from_database(context, threshold='90%'):
413 last_result = get_last_result(context, True)
415 raise AssertionError("No characterization result found.")
416 compare_latency_values(context, last_result, threshold)
418 @then('push result to database')
419 def push_result_database(context):
420 if context.tag == "latency":
421 # override input rate value with percentage one to avoid no match
422 # if pps is not accurate with previous one
423 context.json["rate"] = context.percentage_rate
424 json_result = {"synthesis": context.synthesis, "input": context.json, "output": context.result}
426 if context.tag not in context.results:
427 context.results[context.tag] = [json_result]
429 context.results[context.tag].append(json_result)
435 @retry(AssertionError, tries=24, delay=5.0, logger=None)
436 def test_nfvbench_api(nfvbench_test_url: str):
438 r = requests.get(nfvbench_test_url)
439 assert r.status_code == 200
440 assert json.loads(r.text)["error_message"] == "no pending NFVbench run"
441 except RequestException as exc:
442 raise AssertionError("Fail to access NFVbench API") from exc
445 @retry(AssertionError, tries=1000, delay=2.0, logger=None)
446 def wait_result(context):
447 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
448 context.raw_result = r.text
449 result = json.loads(context.raw_result)
450 assert r.status_code == 200
451 assert result["status"] == STATUS_OK or result["status"] == STATUS_ERROR
455 def percentage_previous_rate(context, rate):
456 previous_rate = context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']]
458 if rate.endswith('%'):
459 rate_percent = convert_percentage_str_to_float(rate)
460 return str(int(previous_rate * rate_percent)) + 'pps'
461 raise Exception('Unknown rate string format %s' % rate)
464 def convert_percentage_str_to_float(percentage):
465 float_percent = float(percentage.replace('%', '').strip())
466 if float_percent <= 0 or float_percent > 100.0:
467 raise Exception('%s is out of valid range (must be 1-100%%)' % percentage)
468 return float_percent / 100
471 def compare_throughput_values(context, last_result, threshold):
472 assert last_result["output"]["status"] == context.result["status"]
473 if last_result["output"]["status"] == "OK":
474 old_throughput = extract_value(last_result["output"], "total_tx_rate")
475 throughput_comparison(context, old_throughput, threshold=threshold)
478 def compare_latency_values(context, last_result, threshold):
479 assert last_result["output"]["status"] == context.result["status"]
480 if last_result["output"]["status"] == "OK":
481 old_latency = extract_value(extract_value(last_result["output"], "overall"),
483 latency_comparison(context, old_latency, threshold=threshold)
486 def throughput_comparison(context, old_throughput_pps=None, threshold=None, reference_values=None):
487 current_throughput_pps = extract_value(context.result, "total_tx_rate")
489 if old_throughput_pps:
490 if not current_throughput_pps >= convert_percentage_str_to_float(
491 threshold) * old_throughput_pps:
492 raise AssertionError(
493 "Current run throughput {current_throughput_pps} is not over {threshold} "
494 " of previous value ({old_throughput_pps})".format(
495 current_throughput_pps=Formatter.suffix('pps')(
496 Formatter.standard(current_throughput_pps)),
497 threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(
498 Formatter.standard(old_throughput_pps))))
499 elif reference_values:
500 if context.unit == 'bps':
501 current_throughput = extract_value(context.result, "offered_tx_rate_bps")
502 reference_values = [int(parse_rate_str(x)['rate_bps']) for x in reference_values]
503 formatted_current_throughput = Formatter.bits(current_throughput)
504 formatted_min_reference_value = Formatter.bits(reference_values[0])
505 formatted_max_reference_value = Formatter.bits(reference_values[1])
507 current_throughput = current_throughput_pps
508 reference_values = [int(parse_rate_str(x)['rate_pps']) for x in reference_values]
509 formatted_current_throughput = Formatter.suffix('pps')(
510 Formatter.standard(current_throughput))
511 formatted_min_reference_value = Formatter.suffix('pps')(
512 Formatter.standard(reference_values[0]))
513 formatted_max_reference_value = Formatter.suffix('pps')(
514 Formatter.standard(reference_values[1]))
515 if not reference_values[0] <= int(current_throughput) <= reference_values[1]:
516 raise AssertionError(
517 "Current run throughput {current_throughput} is not in reference values "
518 "[{min_reference_value}, {max_reference_value}]".format(
519 current_throughput=formatted_current_throughput,
520 min_reference_value=formatted_min_reference_value,
521 max_reference_value=formatted_max_reference_value))
524 def latency_comparison(context, old_latency=None, threshold=None, reference_values=None):
525 overall = extract_value(context.result, "overall")
526 current_latency = extract_value(overall, "avg_delay_usec")
529 if not current_latency <= (2 - convert_percentage_str_to_float(threshold)) * old_latency:
530 threshold = str(200 - int(threshold.strip('%'))) + '%'
531 raise AssertionError(
532 "Current run latency {current_latency}usec is not less than {threshold} of "
533 "previous value ({old_latency}usec)".format(
534 current_latency=Formatter.standard(current_latency), threshold=threshold,
535 old_latency=Formatter.standard(old_latency)))
536 elif reference_values:
537 if not reference_values[0] <= current_latency <= reference_values[1]:
538 raise AssertionError(
539 "Current run latency {current_latency}usec is not in reference values "
540 "[{min_reference_value}, {max_reference_value}]".format(
541 current_latency=Formatter.standard(current_latency),
542 min_reference_value=Formatter.standard(reference_values[0]),
543 max_reference_value=Formatter.standard(reference_values[1])))
546 def get_result_from_input_values(input, result):
547 """Check test conditions in scenario results input.
549 Check whether the input parameters of a behave scenario results record from
550 testapi match the input parameters of the latest test. In other words,
551 check that the test results from testapi come from a test done under the
552 same conditions (frame size, flow count, rate, ...)
555 input: input dict of a results dict of a behave scenario from testapi
557 result: dict of nfvbench params used during the last test
560 True if test conditions match, else False.
563 # Select required keys (other keys can be not set or unconsistent between scenarios)
564 required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
565 if 'user_label' in result:
566 required_keys.append('user_label')
567 if 'flavor_type' in result:
568 required_keys.append('flavor_type')
569 subset_input = dict((k, input[k]) for k in required_keys if k in input)
570 subset_result = dict((k, result[k]) for k in required_keys if k in result)
571 return subset_input == subset_result
574 def extract_value(obj, key):
575 """Pull all values of specified key from nested JSON."""
578 def extract(obj, arr, key):
579 """Recursively search for values of key in JSON tree."""
580 if isinstance(obj, dict):
581 for k, v in obj.items():
584 elif isinstance(v, (dict, list)):
586 elif isinstance(obj, list):
588 extract(item, arr, key)
591 results = extract(obj, arr, key)
595 def get_last_result(context, reference=None, page=None):
597 case_name = 'characterization'
599 case_name = context.CASE_NAME
600 url = context.data['TEST_DB_URL'] + '?project={project_name}&case={case_name}'.format(
601 project_name=context.data['PROJECT_NAME'], case_name=case_name)
602 if context.data['INSTALLER_TYPE']:
603 url += '&installer={installer_name}'.format(installer_name=context.data['INSTALLER_TYPE'])
604 if context.data['NODE_NAME']:
605 url += '&pod={pod_name}'.format(pod_name=context.data['NODE_NAME'])
606 url += '&criteria=PASS'
608 url += '&page={page}'.format(page=page)
609 last_results = requests.get(url)
610 assert last_results.status_code == 200
611 last_results = json.loads(last_results.text)
612 for result in last_results["results"]:
613 for tagged_result in result["details"]["results"][context.tag]:
614 if get_result_from_input_values(tagged_result["input"], context.json):
616 if last_results["pagination"]["current_page"] < last_results["pagination"]["total_pages"]:
617 page = last_results["pagination"]["current_page"] + 1
618 return get_last_result(context, page)