2 # Copyright 2021 Orange
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 from behave import given
18 from behave import when
19 from behave import then
20 from requests import RequestException
21 from retry import retry
25 from subprocess import DEVNULL
26 from typing import Optional
28 from nfvbench.summarizer import Formatter
29 from nfvbench.traffic_gen.traffic_utils import parse_rate_str
31 STATUS_ERROR = "ERROR"
39 @given('PROJECT_NAME: {project_name}')
40 def override_xtesting_project_name(context, project_name):
41 context.data['PROJECT_NAME'] = project_name
44 @given('TEST_DB_URL: {test_db_url}')
45 def override_xtesting_test_db_url(context, test_db_url):
46 context.data['TEST_DB_URL'] = test_db_url
47 context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
50 @given('INSTALLER_TYPE: {installer_type}')
51 def override_xtesting_installer_type(context, installer_type):
52 context.data['INSTALLER_TYPE'] = installer_type
55 @given('DEPLOY_SCENARIO: {deploy_scenario}')
56 def override_xtesting_deploy_scenario(context, deploy_scenario):
57 context.data['DEPLOY_SCENARIO'] = deploy_scenario
60 @given('NODE_NAME: {node_name}')
61 def override_xtesting_node_name(context, node_name):
62 context.data['NODE_NAME'] = node_name
65 @given('BUILD_TAG: {build_tag}')
66 def override_xtesting_build_tag(context, build_tag):
67 context.data['BUILD_TAG'] = build_tag
70 @given('NFVbench config from file: {config_path}')
71 def init_config(context, config_path):
72 context.data['config'] = config_path
75 @given('a JSON NFVbench config')
76 def init_config_from_json(context):
77 context.json.update(json.loads(context.text))
80 @given('log file: {log_file_path}')
81 def log_config(context, log_file_path):
82 context.json['log_file'] = log_file_path
85 @given('json file: {json_file_path}')
86 def json_config(context, json_file_path):
87 context.json['json'] = json_file_path
91 def add_no_clean_up_flag(context):
92 context.json['no_cleanup'] = 'true'
95 @given('TRex is restarted')
96 def add_restart(context):
97 context.json['restart'] = 'true'
100 @given('{label} label')
101 def add_label(context, label):
102 context.json['label'] = label
105 @given('{frame_size} frame size')
106 def add_frame_size(context, frame_size):
107 context.json['frame_sizes'] = [frame_size]
110 @given('{flow_count} flow count')
111 def add_flow_count(context, flow_count):
112 context.json['flow_count'] = flow_count
115 @given('{rate} rate')
116 def add_rate(context, rate):
117 context.json['rate'] = rate
120 @given('{duration} sec run duration')
121 def add_duration(context, duration):
122 context.json['duration_sec'] = duration
125 @given('{percentage_rate} rate of previous scenario')
126 def add_percentage_rate(context, percentage_rate):
127 context.percentage_rate = percentage_rate
128 rate = percentage_previous_rate(context, percentage_rate)
129 context.json['rate'] = rate
135 @when('NFVbench API is ready')
136 @when('NFVbench API is ready on host {host_ip}')
137 @when('NFVbench API is ready on host {host_ip} and port {port:d}')
138 def start_server(context, host_ip: Optional[str]=None, port: Optional[int]=None):
139 # NFVbench server host IP and port number have been setup from environment variables (see
140 # environment.py:before_all()). Here we allow to override them from feature files:
141 if host_ip is not None:
142 context.host_ip = host_ip
146 nfvbench_test_url = "http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port)
149 # check if API is already available
150 requests.get(nfvbench_test_url)
151 except RequestException:
152 cmd = ["nfvbench", "-c", context.data['config'], "--server"]
153 if context.host_ip != "127.0.0.1":
155 cmd.append(context.host_ip)
156 if context.port != 7555:
158 cmd.append(str(context.port))
160 subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
162 context.logger.info("start_server: test nfvbench API: " + nfvbench_test_url)
163 test_nfvbench_api(nfvbench_test_url)
169 @then('run is started and waiting for result')
170 @then('{repeat:d} runs are started and waiting for maximum result')
171 def run_nfvbench_traffic(context, repeat=1):
172 context.logger.info(f"run_nfvbench_traffic: fs={context.json['frame_sizes'][0]} "
173 f"fc={context.json['flow_count']} "
174 f"rate={context.json['rate']} repeat={repeat}")
176 if 'json' not in context.json:
177 context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
178 '/nfvbench-' + context.tag + '-fs_' + \
179 context.json['frame_sizes'][0] + '-fc_' + \
180 context.json['flow_count'] + '-rate_' + \
181 context.json['rate'] + '.json'
182 json_base_name = context.json['json']
184 max_total_tx_rate = None
185 # rem: don't init with 0 in case nfvbench gets crazy and returns a negative packet rate
187 for i in range(repeat):
189 context.json['json'] = json_base_name.strip('.json') + '-' + str(i) + '.json'
191 # Start nfvbench traffic and wait result:
192 url = "http://{ip}:{port}/start_run".format(ip=context.host_ip, port=context.port)
193 payload = json.dumps(context.json)
194 r = requests.post(url, data=payload, headers={'Content-Type': 'application/json'})
195 context.request_id = json.loads(r.text)["request_id"]
196 assert r.status_code == 200
197 result = wait_result(context)
198 assert result["status"] == STATUS_OK
200 # Extract useful metrics from result:
201 total_tx_rate = extract_value(result, "total_tx_rate")
202 overall = extract_value(result, "overall")
203 avg_delay_usec = extract_value(overall, "avg_delay_usec")
206 context.logger.info(f"run_nfvbench_traffic: result #{i+1}: "
207 f"total_tx_rate(pps)={total_tx_rate:,} " # Add ',' thousand separator
208 f"avg_latency_usec={round(avg_delay_usec)}")
210 # Keep only the result with the highest packet rate:
211 if max_total_tx_rate is None or total_tx_rate > max_total_tx_rate:
212 max_total_tx_rate = total_tx_rate
213 context.result = result
214 context.synthesis['total_tx_rate'] = total_tx_rate
215 context.synthesis['avg_delay_usec'] = avg_delay_usec
217 # Log max result only when we did two nfvbench runs or more:
219 context.logger.info(f"run_nfvbench_traffic: max result: "
220 f"total_tx_rate(pps)={context.synthesis['total_tx_rate']:,} "
221 f"avg_latency_usec={round(context.synthesis['avg_delay_usec'])}")
224 @then('extract offered rate result')
225 def save_rate_result(context):
226 total_tx_rate = extract_value(context.result, "total_tx_rate")
227 context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
230 @then('verify throughput result is in same range as the previous result')
231 @then('verify throughput result is greater than {threshold} of the previous result')
232 def get_throughput_result_from_database(context, threshold='90%'):
233 last_result = get_last_result(context)
236 compare_throughput_values(context, last_result, threshold)
239 @then('verify latency result is in same range as the previous result')
240 @then('verify latency result is greater than {threshold} of the previous result')
241 def get_latency_result_from_database(context, threshold='90%'):
242 last_result = get_last_result(context)
245 compare_latency_values(context, last_result, threshold)
248 @then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
249 def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
250 """Check latency result against a fixed threshold.
252 Check that the average latency measured during the current scenario run is
253 lower or equal to the provided fixed reference value.
256 context: The context data of the current scenario run. It includes the
257 test results for that run.
259 max_avg_latency_usec: Reference value to be used as a threshold. This
260 is a maximum average latency expressed in microseconds.
263 AssertionError: The latency result is strictly greater than the reference value.
266 # Get the just measured average latency (a float):
267 new_avg_latency_usec = context.synthesis['avg_delay_usec']
269 # Compare measured value to reference:
270 if new_avg_latency_usec > max_avg_latency_usec:
271 raise AssertionError("Average latency higher than max threshold: "
272 "{avg_latency} usec > {threshold} usec".format(
273 avg_latency=round(new_avg_latency_usec),
274 threshold=round(max_avg_latency_usec)))
278 'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
279 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
280 max_reference_value):
282 reference_values = [min_reference_value + 'pps', max_reference_value + 'pps']
283 throughput_comparison(context, reference_values=reference_values)
287 'verify result is in [{min_reference_value}bps, {max_reference_value}bps] range for throughput')
288 def compare_throughput_bps_result_with_range_values(context, min_reference_value,
289 max_reference_value):
291 reference_values = [min_reference_value + 'bps', max_reference_value + 'bps']
292 throughput_comparison(context, reference_values=reference_values)
295 @then('verify result is in {reference_values} range for latency')
296 def compare_result_with_range_values(context, reference_values):
297 latency_comparison(context, reference_values=reference_values)
300 @then('verify throughput result is in same range as the characterization result')
301 @then('verify throughput result is greater than {threshold} of the characterization result')
302 def get_characterization_throughput_result_from_database(context, threshold='90%'):
303 last_result = get_last_result(context, True)
305 raise AssertionError("No characterization result found.")
306 compare_throughput_values(context, last_result, threshold)
309 @then('verify latency result is in same range as the characterization result')
310 @then('verify latency result is greater than {threshold} of the characterization result')
311 def get_characterization_latency_result_from_database(context, threshold='90%'):
312 last_result = get_last_result(context, True)
314 raise AssertionError("No characterization result found.")
315 compare_latency_values(context, last_result, threshold)
317 @then('push result to database')
318 def push_result_database(context):
319 if context.tag == "latency":
320 # override input rate value with percentage one to avoid no match
321 # if pps is not accurate with previous one
322 context.json["rate"] = context.percentage_rate
323 json_result = {"synthesis": context.synthesis, "input": context.json, "output": context.result}
325 if context.tag not in context.results:
326 context.results[context.tag] = [json_result]
328 context.results[context.tag].append(json_result)
334 @retry(AssertionError, tries=24, delay=5.0, logger=None)
335 def test_nfvbench_api(nfvbench_test_url: str):
337 r = requests.get(nfvbench_test_url)
338 assert r.status_code == 200
339 assert json.loads(r.text)["error_message"] == "no pending NFVbench run"
340 except RequestException as exc:
341 raise AssertionError("Fail to access NFVbench API") from exc
344 @retry(AssertionError, tries=1000, delay=2.0, logger=None)
345 def wait_result(context):
346 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
347 context.raw_result = r.text
348 result = json.loads(context.raw_result)
349 assert r.status_code == 200
350 assert result["status"] == STATUS_OK or result["status"] == STATUS_ERROR
354 def percentage_previous_rate(context, rate):
355 previous_rate = context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']]
357 if rate.endswith('%'):
358 rate_percent = convert_percentage_str_to_float(rate)
359 return str(int(previous_rate * rate_percent)) + 'pps'
360 raise Exception('Unknown rate string format %s' % rate)
363 def convert_percentage_str_to_float(percentage):
364 float_percent = float(percentage.replace('%', '').strip())
365 if float_percent <= 0 or float_percent > 100.0:
366 raise Exception('%s is out of valid range (must be 1-100%%)' % percentage)
367 return float_percent / 100
370 def compare_throughput_values(context, last_result, threshold):
371 assert last_result["output"]["status"] == context.result["status"]
372 if last_result["output"]["status"] == "OK":
373 old_throughput = extract_value(last_result["output"], "total_tx_rate")
374 throughput_comparison(context, old_throughput, threshold=threshold)
377 def compare_latency_values(context, last_result, threshold):
378 assert last_result["output"]["status"] == context.result["status"]
379 if last_result["output"]["status"] == "OK":
380 old_latency = extract_value(extract_value(last_result["output"], "overall"),
382 latency_comparison(context, old_latency, threshold=threshold)
385 def throughput_comparison(context, old_throughput_pps=None, threshold=None, reference_values=None):
386 current_throughput_pps = extract_value(context.result, "total_tx_rate")
388 if old_throughput_pps:
389 if not current_throughput_pps >= convert_percentage_str_to_float(
390 threshold) * old_throughput_pps:
391 raise AssertionError(
392 "Current run throughput {current_throughput_pps} is not over {threshold} "
393 " of previous value ({old_throughput_pps})".format(
394 current_throughput_pps=Formatter.suffix('pps')(
395 Formatter.standard(current_throughput_pps)),
396 threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(
397 Formatter.standard(old_throughput_pps))))
398 elif reference_values:
399 if context.unit == 'bps':
400 current_throughput = extract_value(context.result, "offered_tx_rate_bps")
401 reference_values = [int(parse_rate_str(x)['rate_bps']) for x in reference_values]
402 formatted_current_throughput = Formatter.bits(current_throughput)
403 formatted_min_reference_value = Formatter.bits(reference_values[0])
404 formatted_max_reference_value = Formatter.bits(reference_values[1])
406 current_throughput = current_throughput_pps
407 reference_values = [int(parse_rate_str(x)['rate_pps']) for x in reference_values]
408 formatted_current_throughput = Formatter.suffix('pps')(
409 Formatter.standard(current_throughput))
410 formatted_min_reference_value = Formatter.suffix('pps')(
411 Formatter.standard(reference_values[0]))
412 formatted_max_reference_value = Formatter.suffix('pps')(
413 Formatter.standard(reference_values[1]))
414 if not reference_values[0] <= int(current_throughput) <= reference_values[1]:
415 raise AssertionError(
416 "Current run throughput {current_throughput} is not in reference values "
417 "[{min_reference_value}, {max_reference_value}]".format(
418 current_throughput=formatted_current_throughput,
419 min_reference_value=formatted_min_reference_value,
420 max_reference_value=formatted_max_reference_value))
423 def latency_comparison(context, old_latency=None, threshold=None, reference_values=None):
424 overall = extract_value(context.result, "overall")
425 current_latency = extract_value(overall, "avg_delay_usec")
428 if not current_latency <= (2 - convert_percentage_str_to_float(threshold)) * old_latency:
429 threshold = str(200 - int(threshold.strip('%'))) + '%'
430 raise AssertionError(
431 "Current run latency {current_latency}usec is not less than {threshold} of "
432 "previous value ({old_latency}usec)".format(
433 current_latency=Formatter.standard(current_latency), threshold=threshold,
434 old_latency=Formatter.standard(old_latency)))
435 elif reference_values:
436 if not reference_values[0] <= current_latency <= reference_values[1]:
437 raise AssertionError(
438 "Current run latency {current_latency}usec is not in reference values "
439 "[{min_reference_value}, {max_reference_value}]".format(
440 current_latency=Formatter.standard(current_latency),
441 min_reference_value=Formatter.standard(reference_values[0]),
442 max_reference_value=Formatter.standard(reference_values[1])))
445 def get_result_from_input_values(input, result):
446 # Select required keys (other keys can be not set or unconsistent between scenarios)
447 required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
448 if 'user_label' in result:
449 required_keys.append('user_label')
450 if 'flavor_type' in result:
451 required_keys.append('flavor_type')
452 subset_input = dict((k, input[k]) for k in required_keys if k in input)
453 subset_result = dict((k, result[k]) for k in required_keys if k in result)
454 return subset_input == subset_result
457 def extract_value(obj, key):
458 """Pull all values of specified key from nested JSON."""
461 def extract(obj, arr, key):
462 """Recursively search for values of key in JSON tree."""
463 if isinstance(obj, dict):
464 for k, v in obj.items():
467 elif isinstance(v, (dict, list)):
469 elif isinstance(obj, list):
471 extract(item, arr, key)
474 results = extract(obj, arr, key)
478 def get_last_result(context, reference=None, page=None):
480 case_name = 'characterization'
482 case_name = context.CASE_NAME
483 url = context.data['TEST_DB_URL'] + '?project={project_name}&case={case_name}'.format(
484 project_name=context.data['PROJECT_NAME'], case_name=case_name)
485 if context.data['INSTALLER_TYPE']:
486 url += '&installer={installer_name}'.format(installer_name=context.data['INSTALLER_TYPE'])
487 if context.data['NODE_NAME']:
488 url += '&pod={pod_name}'.format(pod_name=context.data['NODE_NAME'])
489 url += '&criteria=PASS'
491 url += '&page={page}'.format(page=page)
492 last_results = requests.get(url)
493 assert last_results.status_code == 200
494 last_results = json.loads(last_results.text)
495 for result in last_results["results"]:
496 for tagged_result in result["details"]["results"][context.tag]:
497 if get_result_from_input_values(tagged_result["input"], context.json):
499 if last_results["pagination"]["current_page"] < last_results["pagination"]["total_pages"]:
500 page = last_results["pagination"]["current_page"] + 1
501 return get_last_result(context, page)