2 # Copyright 2021 Orange
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 from behave import given
18 from behave import when
19 from behave import then
20 from requests import RequestException
21 from retry import retry
25 from subprocess import DEVNULL
26 from typing import Optional
28 from nfvbench.summarizer import Formatter
29 from nfvbench.traffic_gen.traffic_utils import parse_rate_str
31 STATUS_ERROR = "ERROR"
39 @given('PROJECT_NAME: {project_name}')
40 def override_xtesting_project_name(context, project_name):
41 context.data['PROJECT_NAME'] = project_name
44 @given('TEST_DB_URL: {test_db_url}')
45 def override_xtesting_test_db_url(context, test_db_url):
46 context.data['TEST_DB_URL'] = test_db_url
47 context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
50 @given('INSTALLER_TYPE: {installer_type}')
51 def override_xtesting_installer_type(context, installer_type):
52 context.data['INSTALLER_TYPE'] = installer_type
55 @given('DEPLOY_SCENARIO: {deploy_scenario}')
56 def override_xtesting_deploy_scenario(context, deploy_scenario):
57 context.data['DEPLOY_SCENARIO'] = deploy_scenario
60 @given('NODE_NAME: {node_name}')
61 def override_xtesting_node_name(context, node_name):
62 context.data['NODE_NAME'] = node_name
65 @given('BUILD_TAG: {build_tag}')
66 def override_xtesting_build_tag(context, build_tag):
67 context.data['BUILD_TAG'] = build_tag
70 @given('NFVbench config from file: {config_path}')
71 def init_config(context, config_path):
72 context.data['config'] = config_path
75 @given('a JSON NFVbench config')
76 def init_config_from_json(context):
77 context.json.update(json.loads(context.text))
80 @given('log file: {log_file_path}')
81 def log_config(context, log_file_path):
82 context.json['log_file'] = log_file_path
85 @given('json file: {json_file_path}')
86 def json_config(context, json_file_path):
87 context.json['json'] = json_file_path
91 def add_no_clean_up_flag(context):
92 context.json['no_cleanup'] = 'true'
95 @given('TRex is restarted')
96 def add_restart(context):
97 context.json['restart'] = 'true'
100 @given('{label} label')
101 def add_label(context, label):
102 context.json['label'] = label
105 @given('{frame_size} frame size')
106 def add_frame_size(context, frame_size):
107 context.json['frame_sizes'] = [frame_size]
110 @given('{flow_count} flow count')
111 def add_flow_count(context, flow_count):
112 context.json['flow_count'] = flow_count
115 @given('{rate} rate')
116 def add_rate(context, rate):
117 context.json['rate'] = rate
120 @given('{duration} sec run duration')
121 def add_duration(context, duration):
122 context.json['duration_sec'] = duration
125 @given('{percentage_rate} rate of previous scenario')
126 def add_percentage_rate(context, percentage_rate):
127 context.percentage_rate = percentage_rate
128 rate = percentage_previous_rate(context, percentage_rate)
129 context.json['rate'] = rate
135 @when('NFVbench API is ready')
136 @when('NFVbench API is ready on host {host_ip}')
137 @when('NFVbench API is ready on host {host_ip} and port {port:d}')
138 def start_server(context, host_ip: Optional[str]=None, port: Optional[int]=None):
139 # NFVbench server host IP and port number have been setup from environment variables (see
140 # environment.py:before_all()). Here we allow to override them from feature files:
141 if host_ip is not None:
142 context.host_ip = host_ip
147 # check if API is already available
149 "http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
150 except RequestException:
151 cmd = ["nfvbench", "-c", context.data['config'], "--server"]
152 if context.host_ip != "127.0.0.1":
154 cmd.append(context.host_ip)
155 if context.port != 7555:
157 cmd.append(str(context.port))
159 subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
161 test_nfvbench_api(context)
167 @then('run is started and waiting for result')
168 @then('{repeat:d} runs are started and waiting for maximum result')
169 def run_nfvbench_traffic(context, repeat=1):
170 context.logger.info(f"run_nfvbench_traffic: fs={context.json['frame_sizes'][0]} "
171 f"fc={context.json['flow_count']} "
172 f"rate={context.json['rate']} repeat={repeat}")
174 if 'json' not in context.json:
175 context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
176 '/nfvbench-' + context.tag + '-fs_' + \
177 context.json['frame_sizes'][0] + '-fc_' + \
178 context.json['flow_count'] + '-rate_' + \
179 context.json['rate'] + '.json'
180 json_base_name = context.json['json']
182 max_total_tx_rate = None
183 # rem: don't init with 0 in case nfvbench gets crazy and returns a negative packet rate
185 for i in range(repeat):
187 context.json['json'] = json_base_name.strip('.json') + '-' + str(i) + '.json'
189 # Start nfvbench traffic and wait result:
190 url = "http://{ip}:{port}/start_run".format(ip=context.host_ip, port=context.port)
191 payload = json.dumps(context.json)
192 r = requests.post(url, data=payload, headers={'Content-Type': 'application/json'})
193 context.request_id = json.loads(r.text)["request_id"]
194 assert r.status_code == 200
195 result = wait_result(context)
196 assert result["status"] == STATUS_OK
198 # Extract useful metrics from result:
199 total_tx_rate = extract_value(result, "total_tx_rate")
200 overall = extract_value(result, "overall")
201 avg_delay_usec = extract_value(overall, "avg_delay_usec")
204 context.logger.info(f"run_nfvbench_traffic: result #{i+1}: "
205 f"total_tx_rate(pps)={total_tx_rate:,} " # Add ',' thousand separator
206 f"avg_latency_usec={round(avg_delay_usec)}")
208 # Keep only the result with the highest packet rate:
209 if max_total_tx_rate is None or total_tx_rate > max_total_tx_rate:
210 max_total_tx_rate = total_tx_rate
211 context.result = result
212 context.synthesis['total_tx_rate'] = total_tx_rate
213 context.synthesis['avg_delay_usec'] = avg_delay_usec
215 # Log max result only when we did two nfvbench runs or more:
217 context.logger.info(f"run_nfvbench_traffic: max result: "
218 f"total_tx_rate(pps)={context.synthesis['total_tx_rate']:,} "
219 f"avg_latency_usec={round(context.synthesis['avg_delay_usec'])}")
222 @then('extract offered rate result')
223 def save_rate_result(context):
224 total_tx_rate = extract_value(context.result, "total_tx_rate")
225 context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
228 @then('verify throughput result is in same range as the previous result')
229 @then('verify throughput result is greater than {threshold} of the previous result')
230 def get_throughput_result_from_database(context, threshold='90%'):
231 last_result = get_last_result(context)
234 compare_throughput_values(context, last_result, threshold)
237 @then('verify latency result is in same range as the previous result')
238 @then('verify latency result is greater than {threshold} of the previous result')
239 def get_latency_result_from_database(context, threshold='90%'):
240 last_result = get_last_result(context)
243 compare_latency_values(context, last_result, threshold)
246 @then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
247 def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
248 """Check latency result against a fixed threshold.
250 Check that the average latency measured during the current scenario run is
251 lower or equal to the provided fixed reference value.
254 context: The context data of the current scenario run. It includes the
255 test results for that run.
257 max_avg_latency_usec: Reference value to be used as a threshold. This
258 is a maximum average latency expressed in microseconds.
261 AssertionError: The latency result is strictly greater than the reference value.
264 # Get the just measured average latency (a float):
265 new_avg_latency_usec = context.synthesis['avg_delay_usec']
267 # Compare measured value to reference:
268 if new_avg_latency_usec > max_avg_latency_usec:
269 raise AssertionError("Average latency higher than max threshold: "
270 "{avg_latency} usec > {threshold} usec".format(
271 avg_latency=round(new_avg_latency_usec),
272 threshold=round(max_avg_latency_usec)))
276 'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
277 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
278 max_reference_value):
280 reference_values = [min_reference_value + 'pps', max_reference_value + 'pps']
281 throughput_comparison(context, reference_values=reference_values)
285 'verify result is in [{min_reference_value}bps, {max_reference_value}bps] range for throughput')
286 def compare_throughput_bps_result_with_range_values(context, min_reference_value,
287 max_reference_value):
289 reference_values = [min_reference_value + 'bps', max_reference_value + 'bps']
290 throughput_comparison(context, reference_values=reference_values)
293 @then('verify result is in {reference_values} range for latency')
294 def compare_result_with_range_values(context, reference_values):
295 latency_comparison(context, reference_values=reference_values)
298 @then('verify throughput result is in same range as the characterization result')
299 @then('verify throughput result is greater than {threshold} of the characterization result')
300 def get_characterization_throughput_result_from_database(context, threshold='90%'):
301 last_result = get_last_result(context, True)
303 raise AssertionError("No characterization result found.")
304 compare_throughput_values(context, last_result, threshold)
307 @then('verify latency result is in same range as the characterization result')
308 @then('verify latency result is greater than {threshold} of the characterization result')
309 def get_characterization_latency_result_from_database(context, threshold='90%'):
310 last_result = get_last_result(context, True)
312 raise AssertionError("No characterization result found.")
313 compare_latency_values(context, last_result, threshold)
315 @then('push result to database')
316 def push_result_database(context):
317 if context.tag == "latency":
318 # override input rate value with percentage one to avoid no match
319 # if pps is not accurate with previous one
320 context.json["rate"] = context.percentage_rate
321 json_result = {"synthesis": context.synthesis, "input": context.json, "output": context.result}
323 if context.tag not in context.results:
324 context.results[context.tag] = [json_result]
326 context.results[context.tag].append(json_result)
332 @retry(AssertionError, tries=24, delay=5.0, logger=None)
333 def test_nfvbench_api(context):
335 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
336 assert r.status_code == 200
337 assert json.loads(r.text)["error_message"] == "no pending NFVbench run"
338 except RequestException as exc:
339 raise AssertionError("Fail to access NFVbench API") from exc
342 @retry(AssertionError, tries=1000, delay=2.0, logger=None)
343 def wait_result(context):
344 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
345 context.raw_result = r.text
346 result = json.loads(context.raw_result)
347 assert r.status_code == 200
348 assert result["status"] == STATUS_OK or result["status"] == STATUS_ERROR
352 def percentage_previous_rate(context, rate):
353 previous_rate = context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']]
355 if rate.endswith('%'):
356 rate_percent = convert_percentage_str_to_float(rate)
357 return str(int(previous_rate * rate_percent)) + 'pps'
358 raise Exception('Unknown rate string format %s' % rate)
361 def convert_percentage_str_to_float(percentage):
362 float_percent = float(percentage.replace('%', '').strip())
363 if float_percent <= 0 or float_percent > 100.0:
364 raise Exception('%s is out of valid range (must be 1-100%%)' % percentage)
365 return float_percent / 100
368 def compare_throughput_values(context, last_result, threshold):
369 assert last_result["output"]["status"] == context.result["status"]
370 if last_result["output"]["status"] == "OK":
371 old_throughput = extract_value(last_result["output"], "total_tx_rate")
372 throughput_comparison(context, old_throughput, threshold=threshold)
375 def compare_latency_values(context, last_result, threshold):
376 assert last_result["output"]["status"] == context.result["status"]
377 if last_result["output"]["status"] == "OK":
378 old_latency = extract_value(extract_value(last_result["output"], "overall"),
380 latency_comparison(context, old_latency, threshold=threshold)
383 def throughput_comparison(context, old_throughput_pps=None, threshold=None, reference_values=None):
384 current_throughput_pps = extract_value(context.result, "total_tx_rate")
386 if old_throughput_pps:
387 if not current_throughput_pps >= convert_percentage_str_to_float(
388 threshold) * old_throughput_pps:
389 raise AssertionError(
390 "Current run throughput {current_throughput_pps} is not over {threshold} "
391 " of previous value ({old_throughput_pps})".format(
392 current_throughput_pps=Formatter.suffix('pps')(
393 Formatter.standard(current_throughput_pps)),
394 threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(
395 Formatter.standard(old_throughput_pps))))
396 elif reference_values:
397 if context.unit == 'bps':
398 current_throughput = extract_value(context.result, "offered_tx_rate_bps")
399 reference_values = [int(parse_rate_str(x)['rate_bps']) for x in reference_values]
400 formatted_current_throughput = Formatter.bits(current_throughput)
401 formatted_min_reference_value = Formatter.bits(reference_values[0])
402 formatted_max_reference_value = Formatter.bits(reference_values[1])
404 current_throughput = current_throughput_pps
405 reference_values = [int(parse_rate_str(x)['rate_pps']) for x in reference_values]
406 formatted_current_throughput = Formatter.suffix('pps')(
407 Formatter.standard(current_throughput))
408 formatted_min_reference_value = Formatter.suffix('pps')(
409 Formatter.standard(reference_values[0]))
410 formatted_max_reference_value = Formatter.suffix('pps')(
411 Formatter.standard(reference_values[1]))
412 if not reference_values[0] <= int(current_throughput) <= reference_values[1]:
413 raise AssertionError(
414 "Current run throughput {current_throughput} is not in reference values "
415 "[{min_reference_value}, {max_reference_value}]".format(
416 current_throughput=formatted_current_throughput,
417 min_reference_value=formatted_min_reference_value,
418 max_reference_value=formatted_max_reference_value))
421 def latency_comparison(context, old_latency=None, threshold=None, reference_values=None):
422 overall = extract_value(context.result, "overall")
423 current_latency = extract_value(overall, "avg_delay_usec")
426 if not current_latency <= (2 - convert_percentage_str_to_float(threshold)) * old_latency:
427 threshold = str(200 - int(threshold.strip('%'))) + '%'
428 raise AssertionError(
429 "Current run latency {current_latency}usec is not less than {threshold} of "
430 "previous value ({old_latency}usec)".format(
431 current_latency=Formatter.standard(current_latency), threshold=threshold,
432 old_latency=Formatter.standard(old_latency)))
433 elif reference_values:
434 if not reference_values[0] <= current_latency <= reference_values[1]:
435 raise AssertionError(
436 "Current run latency {current_latency}usec is not in reference values "
437 "[{min_reference_value}, {max_reference_value}]".format(
438 current_latency=Formatter.standard(current_latency),
439 min_reference_value=Formatter.standard(reference_values[0]),
440 max_reference_value=Formatter.standard(reference_values[1])))
443 def get_result_from_input_values(input, result):
444 # Select required keys (other keys can be not set or unconsistent between scenarios)
445 required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
446 if 'user_label' in result:
447 required_keys.append('user_label')
448 if 'flavor_type' in result:
449 required_keys.append('flavor_type')
450 subset_input = dict((k, input[k]) for k in required_keys if k in input)
451 subset_result = dict((k, result[k]) for k in required_keys if k in result)
452 return subset_input == subset_result
455 def extract_value(obj, key):
456 """Pull all values of specified key from nested JSON."""
459 def extract(obj, arr, key):
460 """Recursively search for values of key in JSON tree."""
461 if isinstance(obj, dict):
462 for k, v in obj.items():
465 elif isinstance(v, (dict, list)):
467 elif isinstance(obj, list):
469 extract(item, arr, key)
472 results = extract(obj, arr, key)
476 def get_last_result(context, reference=None, page=None):
478 case_name = 'characterization'
480 case_name = context.CASE_NAME
481 url = context.data['TEST_DB_URL'] + '?project={project_name}&case={case_name}'.format(
482 project_name=context.data['PROJECT_NAME'], case_name=case_name)
483 if context.data['INSTALLER_TYPE']:
484 url += '&installer={installer_name}'.format(installer_name=context.data['INSTALLER_TYPE'])
485 if context.data['NODE_NAME']:
486 url += '&pod={pod_name}'.format(pod_name=context.data['NODE_NAME'])
487 url += '&criteria=PASS'
489 url += '&page={page}'.format(page=page)
490 last_results = requests.get(url)
491 assert last_results.status_code == 200
492 last_results = json.loads(last_results.text)
493 for result in last_results["results"]:
494 for tagged_result in result["details"]["results"][context.tag]:
495 if get_result_from_input_values(tagged_result["input"], context.json):
497 if last_results["pagination"]["current_page"] < last_results["pagination"]["total_pages"]:
498 page = last_results["pagination"]["current_page"] + 1
499 return get_last_result(context, page)