2 # Copyright 2021 Orange
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 from functools import reduce
19 from behave import given
20 from behave import when
21 from behave import then
22 from requests import RequestException
23 from retry import retry
27 from subprocess import DEVNULL
28 from typing import Optional
30 from nfvbench.summarizer import Formatter
31 from nfvbench.traffic_gen.traffic_utils import parse_rate_str
33 STATUS_ERROR = "ERROR"
41 @given('PROJECT_NAME: {project_name}')
42 def override_xtesting_project_name(context, project_name):
43 context.data['PROJECT_NAME'] = project_name
46 @given('TEST_DB_URL: {test_db_url}')
47 def override_xtesting_test_db_url(context, test_db_url):
48 context.data['TEST_DB_URL'] = test_db_url
49 context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
52 @given('INSTALLER_TYPE: {installer_type}')
53 def override_xtesting_installer_type(context, installer_type):
54 context.data['INSTALLER_TYPE'] = installer_type
57 @given('DEPLOY_SCENARIO: {deploy_scenario}')
58 def override_xtesting_deploy_scenario(context, deploy_scenario):
59 context.data['DEPLOY_SCENARIO'] = deploy_scenario
62 @given('NODE_NAME: {node_name}')
63 def override_xtesting_node_name(context, node_name):
64 context.data['NODE_NAME'] = node_name
67 @given('BUILD_TAG: {build_tag}')
68 def override_xtesting_build_tag(context, build_tag):
69 context.data['BUILD_TAG'] = build_tag
72 @given('NFVbench config from file: {config_path}')
73 def init_config(context, config_path):
74 context.data['config'] = config_path
77 @given('a JSON NFVbench config')
78 def init_config_from_json(context):
79 context.json.update(json.loads(context.text))
82 @given('log file: {log_file_path}')
83 def log_config(context, log_file_path):
84 context.json['log_file'] = log_file_path
87 @given('json file: {json_file_path}')
88 def json_config(context, json_file_path):
89 context.json['json'] = json_file_path
93 def add_no_clean_up_flag(context):
94 context.json['no_cleanup'] = 'true'
97 @given('TRex is restarted')
98 def add_restart(context):
99 context.json['restart'] = 'true'
102 @given('{label} label')
103 def add_label(context, label):
104 context.json['label'] = label
107 @given('{frame_size} frame size')
108 def add_frame_size(context, frame_size):
109 context.json['frame_sizes'] = [frame_size]
112 @given('{flow_count} flow count')
113 def add_flow_count(context, flow_count):
114 context.json['flow_count'] = flow_count
117 @given('{rate} rate')
118 def add_rate(context, rate):
119 context.json['rate'] = rate
122 @given('{duration} sec run duration')
123 def add_duration(context, duration):
124 context.json['duration_sec'] = duration
127 @given('{percentage_rate} rate of previous scenario')
128 def add_percentage_rate(context, percentage_rate):
129 context.percentage_rate = percentage_rate
130 rate = percentage_previous_rate(context, percentage_rate)
131 context.json['rate'] = rate
137 @when('NFVbench API is ready')
138 @when('NFVbench API is ready on host {host_ip}')
139 @when('NFVbench API is ready on host {host_ip} and port {port:d}')
140 def start_server(context, host_ip: Optional[str]=None, port: Optional[int]=None):
141 # NFVbench server host IP and port number have been setup from environment variables (see
142 # environment.py:before_all()). Here we allow to override them from feature files:
143 if host_ip is not None:
144 context.host_ip = host_ip
149 # check if API is already available
151 "http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
152 except RequestException:
153 cmd = ["nfvbench", "-c", context.data['config'], "--server"]
154 if context.host_ip != "127.0.0.1":
156 cmd.append(context.host_ip)
157 if context.port != 7555:
159 cmd.append(str(context.port))
161 subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
163 test_nfvbench_api(context)
169 @then('run is started and waiting for result')
170 @then('{repeat:d} runs are started and waiting for maximum result')
171 def step_impl(context, repeat=1):
173 if 'json' not in context.json:
174 context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
175 '/nfvbench-' + context.tag + '-fs_' + \
176 context.json['frame_sizes'][0] + '-fc_' + \
177 context.json['flow_count'] + '-rate_' + \
178 context.json['rate'] + '.json'
179 json_base_name = context.json['json']
180 for i in range(repeat):
182 context.json['json'] = json_base_name.strip('.json') + '-' + str(i) + '.json'
184 url = "http://{ip}:{port}/start_run".format(ip=context.host_ip, port=context.port)
185 payload = json.dumps(context.json)
186 r = requests.post(url, data=payload, headers={'Content-Type': 'application/json'})
187 context.request_id = json.loads(r.text)["request_id"]
188 assert r.status_code == 200
189 result = wait_result(context)
190 results.append(result)
191 assert result["status"] == STATUS_OK
194 context.result = reduce(
195 lambda x, y: x if extract_value(x, "total_tx_rate") > extract_value(y,
196 "total_tx_rate") else y,
199 total_tx_rate = extract_value(context.result, "total_tx_rate")
200 overall = extract_value(context.result, "overall")
201 avg_delay_usec = extract_value(overall, "avg_delay_usec")
202 # create a synthesis with offered pps and latency values
203 context.synthesis['total_tx_rate'] = total_tx_rate
204 context.synthesis['avg_delay_usec'] = avg_delay_usec
207 @then('extract offered rate result')
208 def save_rate_result(context):
209 total_tx_rate = extract_value(context.result, "total_tx_rate")
210 context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
213 @then('verify throughput result is in same range as the previous result')
214 @then('verify throughput result is greater than {threshold} of the previous result')
215 def get_throughput_result_from_database(context, threshold='90%'):
216 last_result = get_last_result(context)
219 compare_throughput_values(context, last_result, threshold)
222 @then('verify latency result is in same range as the previous result')
223 @then('verify latency result is greater than {threshold} of the previous result')
224 def get_latency_result_from_database(context, threshold='90%'):
225 last_result = get_last_result(context)
228 compare_latency_values(context, last_result, threshold)
231 @then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
232 def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
233 """Check latency result against a fixed threshold.
235 Check that the average latency measured during the current scenario run is
236 lower or equal to the provided fixed reference value.
239 context: The context data of the current scenario run. It includes the
240 test results for that run.
242 max_avg_latency_usec: Reference value to be used as a threshold. This
243 is a maximum average latency expressed in microseconds.
246 AssertionError: The latency result is strictly greater than the reference value.
249 # Get the just measured average latency (a float):
250 new_avg_latency_usec = context.synthesis['avg_delay_usec']
252 # Compare measured value to reference:
253 if new_avg_latency_usec > max_avg_latency_usec:
254 raise AssertionError("Average latency higher than max threshold: "
255 "{avg_latency} usec > {threshold} usec".format(
256 avg_latency=round(new_avg_latency_usec),
257 threshold=round(max_avg_latency_usec)))
261 'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
262 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
263 max_reference_value):
265 reference_values = [min_reference_value + 'pps', max_reference_value + 'pps']
266 throughput_comparison(context, reference_values=reference_values)
270 'verify result is in [{min_reference_value}bps, {max_reference_value}bps] range for throughput')
271 def compare_throughput_bps_result_with_range_values(context, min_reference_value,
272 max_reference_value):
274 reference_values = [min_reference_value + 'bps', max_reference_value + 'bps']
275 throughput_comparison(context, reference_values=reference_values)
278 @then('verify result is in {reference_values} range for latency')
279 def compare_result_with_range_values(context, reference_values):
280 latency_comparison(context, reference_values=reference_values)
283 @then('verify throughput result is in same range as the characterization result')
284 @then('verify throughput result is greater than {threshold} of the characterization result')
285 def get_characterization_throughput_result_from_database(context, threshold='90%'):
286 last_result = get_last_result(context, True)
288 raise AssertionError("No characterization result found.")
289 compare_throughput_values(context, last_result, threshold)
292 @then('verify latency result is in same range as the characterization result')
293 @then('verify latency result is greater than {threshold} of the characterization result')
294 def get_characterization_latency_result_from_database(context, threshold='90%'):
295 last_result = get_last_result(context, True)
297 raise AssertionError("No characterization result found.")
298 compare_latency_values(context, last_result, threshold)
300 @then('push result to database')
301 def push_result_database(context):
302 if context.tag == "latency":
303 # override input rate value with percentage one to avoid no match
304 # if pps is not accurate with previous one
305 context.json["rate"] = context.percentage_rate
306 json_result = {"synthesis": context.synthesis, "input": context.json, "output": context.result}
308 if context.tag not in context.results:
309 context.results[context.tag] = [json_result]
311 context.results[context.tag].append(json_result)
317 @retry(AssertionError, tries=24, delay=5.0, logger=None)
318 def test_nfvbench_api(context):
320 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
321 assert r.status_code == 200
322 assert json.loads(r.text)["error_message"] == "no pending NFVbench run"
323 except RequestException as exc:
324 raise AssertionError("Fail to access NFVbench API") from exc
327 @retry(AssertionError, tries=1000, delay=2.0, logger=None)
328 def wait_result(context):
329 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
330 context.raw_result = r.text
331 result = json.loads(context.raw_result)
332 assert r.status_code == 200
333 assert result["status"] == STATUS_OK or result["status"] == STATUS_ERROR
337 def percentage_previous_rate(context, rate):
338 previous_rate = context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']]
340 if rate.endswith('%'):
341 rate_percent = convert_percentage_str_to_float(rate)
342 return str(int(previous_rate * rate_percent)) + 'pps'
343 raise Exception('Unknown rate string format %s' % rate)
346 def convert_percentage_str_to_float(percentage):
347 float_percent = float(percentage.replace('%', '').strip())
348 if float_percent <= 0 or float_percent > 100.0:
349 raise Exception('%s is out of valid range (must be 1-100%%)' % percentage)
350 return float_percent / 100
353 def compare_throughput_values(context, last_result, threshold):
354 assert last_result["output"]["status"] == context.result["status"]
355 if last_result["output"]["status"] == "OK":
356 old_throughput = extract_value(last_result["output"], "total_tx_rate")
357 throughput_comparison(context, old_throughput, threshold=threshold)
360 def compare_latency_values(context, last_result, threshold):
361 assert last_result["output"]["status"] == context.result["status"]
362 if last_result["output"]["status"] == "OK":
363 old_latency = extract_value(extract_value(last_result["output"], "overall"),
365 latency_comparison(context, old_latency, threshold=threshold)
368 def throughput_comparison(context, old_throughput_pps=None, threshold=None, reference_values=None):
369 current_throughput_pps = extract_value(context.result, "total_tx_rate")
371 if old_throughput_pps:
372 if not current_throughput_pps >= convert_percentage_str_to_float(
373 threshold) * old_throughput_pps:
374 raise AssertionError(
375 "Current run throughput {current_throughput_pps} is not over {threshold} "
376 " of previous value ({old_throughput_pps})".format(
377 current_throughput_pps=Formatter.suffix('pps')(
378 Formatter.standard(current_throughput_pps)),
379 threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(
380 Formatter.standard(old_throughput_pps))))
381 elif reference_values:
382 if context.unit == 'bps':
383 current_throughput = extract_value(context.result, "offered_tx_rate_bps")
384 reference_values = [int(parse_rate_str(x)['rate_bps']) for x in reference_values]
385 formatted_current_throughput = Formatter.bits(current_throughput)
386 formatted_min_reference_value = Formatter.bits(reference_values[0])
387 formatted_max_reference_value = Formatter.bits(reference_values[1])
389 current_throughput = current_throughput_pps
390 reference_values = [int(parse_rate_str(x)['rate_pps']) for x in reference_values]
391 formatted_current_throughput = Formatter.suffix('pps')(
392 Formatter.standard(current_throughput))
393 formatted_min_reference_value = Formatter.suffix('pps')(
394 Formatter.standard(reference_values[0]))
395 formatted_max_reference_value = Formatter.suffix('pps')(
396 Formatter.standard(reference_values[1]))
397 if not reference_values[0] <= int(current_throughput) <= reference_values[1]:
398 raise AssertionError(
399 "Current run throughput {current_throughput} is not in reference values "
400 "[{min_reference_value}, {max_reference_value}]".format(
401 current_throughput=formatted_current_throughput,
402 min_reference_value=formatted_min_reference_value,
403 max_reference_value=formatted_max_reference_value))
406 def latency_comparison(context, old_latency=None, threshold=None, reference_values=None):
407 overall = extract_value(context.result, "overall")
408 current_latency = extract_value(overall, "avg_delay_usec")
411 if not current_latency <= (2 - convert_percentage_str_to_float(threshold)) * old_latency:
412 threshold = str(200 - int(threshold.strip('%'))) + '%'
413 raise AssertionError(
414 "Current run latency {current_latency}usec is not less than {threshold} of "
415 "previous value ({old_latency}usec)".format(
416 current_latency=Formatter.standard(current_latency), threshold=threshold,
417 old_latency=Formatter.standard(old_latency)))
418 elif reference_values:
419 if not reference_values[0] <= current_latency <= reference_values[1]:
420 raise AssertionError(
421 "Current run latency {current_latency}usec is not in reference values "
422 "[{min_reference_value}, {max_reference_value}]".format(
423 current_latency=Formatter.standard(current_latency),
424 min_reference_value=Formatter.standard(reference_values[0]),
425 max_reference_value=Formatter.standard(reference_values[1])))
428 def get_result_from_input_values(input, result):
429 # Select required keys (other keys can be not set or unconsistent between scenarios)
430 required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
431 if 'user_label' in result:
432 required_keys.append('user_label')
433 if 'flavor_type' in result:
434 required_keys.append('flavor_type')
435 subset_input = dict((k, input[k]) for k in required_keys if k in input)
436 subset_result = dict((k, result[k]) for k in required_keys if k in result)
437 return subset_input == subset_result
440 def extract_value(obj, key):
441 """Pull all values of specified key from nested JSON."""
444 def extract(obj, arr, key):
445 """Recursively search for values of key in JSON tree."""
446 if isinstance(obj, dict):
447 for k, v in obj.items():
450 elif isinstance(v, (dict, list)):
452 elif isinstance(obj, list):
454 extract(item, arr, key)
457 results = extract(obj, arr, key)
461 def get_last_result(context, reference=None, page=None):
463 case_name = 'characterization'
465 case_name = context.CASE_NAME
466 url = context.data['TEST_DB_URL'] + '?project={project_name}&case={case_name}'.format(
467 project_name=context.data['PROJECT_NAME'], case_name=case_name)
468 if context.data['INSTALLER_TYPE']:
469 url += '&installer={installer_name}'.format(installer_name=context.data['INSTALLER_TYPE'])
470 if context.data['NODE_NAME']:
471 url += '&pod={pod_name}'.format(pod_name=context.data['NODE_NAME'])
472 url += '&criteria=PASS'
474 url += '&page={page}'.format(page=page)
475 last_results = requests.get(url)
476 assert last_results.status_code == 200
477 last_results = json.loads(last_results.text)
478 for result in last_results["results"]:
479 for tagged_result in result["details"]["results"][context.tag]:
480 if get_result_from_input_values(tagged_result["input"], context.json):
482 if last_results["pagination"]["current_page"] < last_results["pagination"]["total_pages"]:
483 page = last_results["pagination"]["current_page"] + 1
484 return get_last_result(context, page)