2 # Copyright 2021 Orange
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 from behave import given
18 from behave import when
19 from behave import then
20 from requests import RequestException
21 from retry import retry
25 from subprocess import DEVNULL
26 from typing import Optional
28 from nfvbench.summarizer import Formatter
29 from nfvbench.traffic_gen.traffic_utils import parse_rate_str
31 STATUS_ERROR = "ERROR"
39 @given('PROJECT_NAME: {project_name}')
40 def override_xtesting_project_name(context, project_name):
41 context.data['PROJECT_NAME'] = project_name
44 @given('TEST_DB_URL: {test_db_url}')
45 def override_xtesting_test_db_url(context, test_db_url):
46 context.data['TEST_DB_URL'] = test_db_url
47 context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
50 @given('INSTALLER_TYPE: {installer_type}')
51 def override_xtesting_installer_type(context, installer_type):
52 context.data['INSTALLER_TYPE'] = installer_type
55 @given('DEPLOY_SCENARIO: {deploy_scenario}')
56 def override_xtesting_deploy_scenario(context, deploy_scenario):
57 context.data['DEPLOY_SCENARIO'] = deploy_scenario
60 @given('NODE_NAME: {node_name}')
61 def override_xtesting_node_name(context, node_name):
62 context.data['NODE_NAME'] = node_name
65 @given('BUILD_TAG: {build_tag}')
66 def override_xtesting_build_tag(context, build_tag):
67 context.data['BUILD_TAG'] = build_tag
70 @given('NFVbench config from file: {config_path}')
71 def init_config(context, config_path):
72 context.data['config'] = config_path
75 @given('a JSON NFVbench config')
76 def init_config_from_json(context):
77 context.json.update(json.loads(context.text))
80 @given('log file: {log_file_path}')
81 def log_config(context, log_file_path):
82 context.json['log_file'] = log_file_path
85 @given('json file: {json_file_path}')
86 def json_config(context, json_file_path):
87 context.json['json'] = json_file_path
91 def add_no_clean_up_flag(context):
92 context.json['no_cleanup'] = 'true'
95 @given('TRex is restarted')
96 def add_restart(context):
97 context.json['restart'] = 'true'
100 @given('{label} label')
101 def add_label(context, label):
102 context.json['label'] = label
105 @given('{frame_size} frame size')
106 def add_frame_size(context, frame_size):
107 context.json['frame_sizes'] = [frame_size]
110 @given('{flow_count} flow count')
111 def add_flow_count(context, flow_count):
112 context.json['flow_count'] = flow_count
115 @given('{rate} rate')
116 def add_rate(context, rate):
117 context.json['rate'] = rate
120 @given('{duration} sec run duration')
121 def add_duration(context, duration):
122 context.json['duration_sec'] = duration
125 @given('{percentage_rate} rate of previous scenario')
126 def add_percentage_rate(context, percentage_rate):
127 context.percentage_rate = percentage_rate
128 rate = percentage_previous_rate(context, percentage_rate)
129 context.json['rate'] = rate
130 context.logger.info(f"add_percentage_rate: {percentage_rate} => rate={rate}")
136 @when('NFVbench API is ready')
137 @when('NFVbench API is ready on host {host_ip}')
138 @when('NFVbench API is ready on host {host_ip} and port {port:d}')
139 def start_server(context, host_ip: Optional[str]=None, port: Optional[int]=None):
140 # NFVbench server host IP and port number have been setup from environment variables (see
141 # environment.py:before_all()). Here we allow to override them from feature files:
142 if host_ip is not None:
143 context.host_ip = host_ip
147 nfvbench_test_url = "http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port)
150 # check if API is already available
151 requests.get(nfvbench_test_url)
152 except RequestException:
153 cmd = ["nfvbench", "-c", context.data['config'], "--server"]
154 if context.host_ip != "127.0.0.1":
156 cmd.append(context.host_ip)
157 if context.port != 7555:
159 cmd.append(str(context.port))
161 subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
163 context.logger.info("start_server: test nfvbench API: " + nfvbench_test_url)
164 test_nfvbench_api(nfvbench_test_url)
170 @then('run is started and waiting for result')
171 @then('{repeat:d} runs are started and waiting for maximum result')
172 def run_nfvbench_traffic(context, repeat=1):
173 context.logger.info(f"run_nfvbench_traffic: fs={context.json['frame_sizes'][0]} "
174 f"fc={context.json['flow_count']} "
175 f"rate={context.json['rate']} repeat={repeat}")
177 if 'json' not in context.json:
178 context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
179 '/nfvbench-' + context.tag + '-fs_' + \
180 context.json['frame_sizes'][0] + '-fc_' + \
181 context.json['flow_count'] + '-rate_' + \
182 context.json['rate'] + '.json'
183 json_base_name = context.json['json']
185 max_total_tx_rate = None
186 # rem: don't init with 0 in case nfvbench gets crazy and returns a negative packet rate
188 for i in range(repeat):
190 context.json['json'] = json_base_name.strip('.json') + '-' + str(i) + '.json'
192 # Start nfvbench traffic and wait result:
193 url = "http://{ip}:{port}/start_run".format(ip=context.host_ip, port=context.port)
194 payload = json.dumps(context.json)
195 r = requests.post(url, data=payload, headers={'Content-Type': 'application/json'})
196 context.request_id = json.loads(r.text)["request_id"]
197 assert r.status_code == 200
198 result = wait_result(context)
199 assert result["status"] == STATUS_OK
201 # Extract useful metrics from result:
202 total_tx_rate = extract_value(result, "total_tx_rate")
203 overall = extract_value(result, "overall")
204 avg_delay_usec = extract_value(overall, "avg_delay_usec")
207 context.logger.info(f"run_nfvbench_traffic: result #{i+1}: "
208 f"total_tx_rate(pps)={total_tx_rate:,} " # Add ',' thousand separator
209 f"avg_latency_usec={round(avg_delay_usec)}")
211 # Keep only the result with the highest packet rate:
212 if max_total_tx_rate is None or total_tx_rate > max_total_tx_rate:
213 max_total_tx_rate = total_tx_rate
214 context.result = result
215 context.synthesis['total_tx_rate'] = total_tx_rate
216 context.synthesis['avg_delay_usec'] = avg_delay_usec
218 # Log max result only when we did two nfvbench runs or more:
220 context.logger.info(f"run_nfvbench_traffic: max result: "
221 f"total_tx_rate(pps)={context.synthesis['total_tx_rate']:,} "
222 f"avg_latency_usec={round(context.synthesis['avg_delay_usec'])}")
225 @then('extract offered rate result')
226 def save_rate_result(context):
227 total_tx_rate = extract_value(context.result, "total_tx_rate")
228 context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
231 @then('verify throughput result is in same range as the previous result')
232 @then('verify throughput result is greater than {threshold} of the previous result')
233 def get_throughput_result_from_database(context, threshold='90%'):
234 last_result = get_last_result(context)
237 compare_throughput_values(context, last_result, threshold)
240 @then('verify latency result is in same range as the previous result')
241 @then('verify latency result is greater than {threshold} of the previous result')
242 def get_latency_result_from_database(context, threshold='90%'):
243 last_result = get_last_result(context)
246 compare_latency_values(context, last_result, threshold)
249 @then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
250 def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
251 """Check latency result against a fixed threshold.
253 Check that the average latency measured during the current scenario run is
254 lower or equal to the provided fixed reference value.
257 context: The context data of the current scenario run. It includes the
258 test results for that run.
260 max_avg_latency_usec: Reference value to be used as a threshold. This
261 is a maximum average latency expressed in microseconds.
264 AssertionError: The latency result is strictly greater than the reference value.
267 # Get the just measured average latency (a float):
268 new_avg_latency_usec = context.synthesis['avg_delay_usec']
271 context.logger.info("check_latency_result_against_fixed_threshold(usec): "
272 "{value}<={ref}?".format(
273 value=round(new_avg_latency_usec),
274 ref=round(max_avg_latency_usec)))
276 # Compare measured value to reference:
277 if new_avg_latency_usec > max_avg_latency_usec:
278 raise AssertionError("Average latency higher than max threshold: "
279 "{value} usec > {ref} usec".format(
280 value=round(new_avg_latency_usec),
281 ref=round(max_avg_latency_usec)))
285 'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
286 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
287 max_reference_value):
289 reference_values = [min_reference_value + 'pps', max_reference_value + 'pps']
290 throughput_comparison(context, reference_values=reference_values)
294 'verify result is in [{min_reference_value}bps, {max_reference_value}bps] range for throughput')
295 def compare_throughput_bps_result_with_range_values(context, min_reference_value,
296 max_reference_value):
298 reference_values = [min_reference_value + 'bps', max_reference_value + 'bps']
299 throughput_comparison(context, reference_values=reference_values)
302 @then('verify result is in {reference_values} range for latency')
303 def compare_result_with_range_values(context, reference_values):
304 latency_comparison(context, reference_values=reference_values)
307 @then('verify throughput result is in same range as the characterization result')
308 @then('verify throughput result is greater than {threshold} of the characterization result')
309 def get_characterization_throughput_result_from_database(context, threshold='90%'):
310 last_result = get_last_result(context, True)
312 raise AssertionError("No characterization result found.")
313 compare_throughput_values(context, last_result, threshold)
316 @then('verify latency result is in same range as the characterization result')
317 @then('verify latency result is greater than {threshold} of the characterization result')
318 def get_characterization_latency_result_from_database(context, threshold='90%'):
319 last_result = get_last_result(context, True)
321 raise AssertionError("No characterization result found.")
322 compare_latency_values(context, last_result, threshold)
324 @then('push result to database')
325 def push_result_database(context):
326 if context.tag == "latency":
327 # override input rate value with percentage one to avoid no match
328 # if pps is not accurate with previous one
329 context.json["rate"] = context.percentage_rate
330 json_result = {"synthesis": context.synthesis, "input": context.json, "output": context.result}
332 if context.tag not in context.results:
333 context.results[context.tag] = [json_result]
335 context.results[context.tag].append(json_result)
341 @retry(AssertionError, tries=24, delay=5.0, logger=None)
342 def test_nfvbench_api(nfvbench_test_url: str):
344 r = requests.get(nfvbench_test_url)
345 assert r.status_code == 200
346 assert json.loads(r.text)["error_message"] == "no pending NFVbench run"
347 except RequestException as exc:
348 raise AssertionError("Fail to access NFVbench API") from exc
351 @retry(AssertionError, tries=1000, delay=2.0, logger=None)
352 def wait_result(context):
353 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
354 context.raw_result = r.text
355 result = json.loads(context.raw_result)
356 assert r.status_code == 200
357 assert result["status"] == STATUS_OK or result["status"] == STATUS_ERROR
361 def percentage_previous_rate(context, rate):
362 previous_rate = context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']]
364 if rate.endswith('%'):
365 rate_percent = convert_percentage_str_to_float(rate)
366 return str(int(previous_rate * rate_percent)) + 'pps'
367 raise Exception('Unknown rate string format %s' % rate)
370 def convert_percentage_str_to_float(percentage):
371 float_percent = float(percentage.replace('%', '').strip())
372 if float_percent <= 0 or float_percent > 100.0:
373 raise Exception('%s is out of valid range (must be 1-100%%)' % percentage)
374 return float_percent / 100
377 def compare_throughput_values(context, last_result, threshold):
378 assert last_result["output"]["status"] == context.result["status"]
379 if last_result["output"]["status"] == "OK":
380 old_throughput = extract_value(last_result["output"], "total_tx_rate")
381 throughput_comparison(context, old_throughput, threshold=threshold)
384 def compare_latency_values(context, last_result, threshold):
385 assert last_result["output"]["status"] == context.result["status"]
386 if last_result["output"]["status"] == "OK":
387 old_latency = extract_value(extract_value(last_result["output"], "overall"),
389 latency_comparison(context, old_latency, threshold=threshold)
392 def throughput_comparison(context, old_throughput_pps=None, threshold=None, reference_values=None):
393 current_throughput_pps = extract_value(context.result, "total_tx_rate")
395 if old_throughput_pps:
396 if not current_throughput_pps >= convert_percentage_str_to_float(
397 threshold) * old_throughput_pps:
398 raise AssertionError(
399 "Current run throughput {current_throughput_pps} is not over {threshold} "
400 " of previous value ({old_throughput_pps})".format(
401 current_throughput_pps=Formatter.suffix('pps')(
402 Formatter.standard(current_throughput_pps)),
403 threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(
404 Formatter.standard(old_throughput_pps))))
405 elif reference_values:
406 if context.unit == 'bps':
407 current_throughput = extract_value(context.result, "offered_tx_rate_bps")
408 reference_values = [int(parse_rate_str(x)['rate_bps']) for x in reference_values]
409 formatted_current_throughput = Formatter.bits(current_throughput)
410 formatted_min_reference_value = Formatter.bits(reference_values[0])
411 formatted_max_reference_value = Formatter.bits(reference_values[1])
413 current_throughput = current_throughput_pps
414 reference_values = [int(parse_rate_str(x)['rate_pps']) for x in reference_values]
415 formatted_current_throughput = Formatter.suffix('pps')(
416 Formatter.standard(current_throughput))
417 formatted_min_reference_value = Formatter.suffix('pps')(
418 Formatter.standard(reference_values[0]))
419 formatted_max_reference_value = Formatter.suffix('pps')(
420 Formatter.standard(reference_values[1]))
421 if not reference_values[0] <= int(current_throughput) <= reference_values[1]:
422 raise AssertionError(
423 "Current run throughput {current_throughput} is not in reference values "
424 "[{min_reference_value}, {max_reference_value}]".format(
425 current_throughput=formatted_current_throughput,
426 min_reference_value=formatted_min_reference_value,
427 max_reference_value=formatted_max_reference_value))
430 def latency_comparison(context, old_latency=None, threshold=None, reference_values=None):
431 overall = extract_value(context.result, "overall")
432 current_latency = extract_value(overall, "avg_delay_usec")
435 if not current_latency <= (2 - convert_percentage_str_to_float(threshold)) * old_latency:
436 threshold = str(200 - int(threshold.strip('%'))) + '%'
437 raise AssertionError(
438 "Current run latency {current_latency}usec is not less than {threshold} of "
439 "previous value ({old_latency}usec)".format(
440 current_latency=Formatter.standard(current_latency), threshold=threshold,
441 old_latency=Formatter.standard(old_latency)))
442 elif reference_values:
443 if not reference_values[0] <= current_latency <= reference_values[1]:
444 raise AssertionError(
445 "Current run latency {current_latency}usec is not in reference values "
446 "[{min_reference_value}, {max_reference_value}]".format(
447 current_latency=Formatter.standard(current_latency),
448 min_reference_value=Formatter.standard(reference_values[0]),
449 max_reference_value=Formatter.standard(reference_values[1])))
452 def get_result_from_input_values(input, result):
453 # Select required keys (other keys can be not set or unconsistent between scenarios)
454 required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
455 if 'user_label' in result:
456 required_keys.append('user_label')
457 if 'flavor_type' in result:
458 required_keys.append('flavor_type')
459 subset_input = dict((k, input[k]) for k in required_keys if k in input)
460 subset_result = dict((k, result[k]) for k in required_keys if k in result)
461 return subset_input == subset_result
464 def extract_value(obj, key):
465 """Pull all values of specified key from nested JSON."""
468 def extract(obj, arr, key):
469 """Recursively search for values of key in JSON tree."""
470 if isinstance(obj, dict):
471 for k, v in obj.items():
474 elif isinstance(v, (dict, list)):
476 elif isinstance(obj, list):
478 extract(item, arr, key)
481 results = extract(obj, arr, key)
485 def get_last_result(context, reference=None, page=None):
487 case_name = 'characterization'
489 case_name = context.CASE_NAME
490 url = context.data['TEST_DB_URL'] + '?project={project_name}&case={case_name}'.format(
491 project_name=context.data['PROJECT_NAME'], case_name=case_name)
492 if context.data['INSTALLER_TYPE']:
493 url += '&installer={installer_name}'.format(installer_name=context.data['INSTALLER_TYPE'])
494 if context.data['NODE_NAME']:
495 url += '&pod={pod_name}'.format(pod_name=context.data['NODE_NAME'])
496 url += '&criteria=PASS'
498 url += '&page={page}'.format(page=page)
499 last_results = requests.get(url)
500 assert last_results.status_code == 200
501 last_results = json.loads(last_results.text)
502 for result in last_results["results"]:
503 for tagged_result in result["details"]["results"][context.tag]:
504 if get_result_from_input_values(tagged_result["input"], context.json):
506 if last_results["pagination"]["current_page"] < last_results["pagination"]["total_pages"]:
507 page = last_results["pagination"]["current_page"] + 1
508 return get_last_result(context, page)