2 # Copyright 2021 Orange
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 from functools import reduce
19 from behave import given
20 from behave import when
21 from behave import then
22 from requests import RequestException
23 from retry import retry
27 from subprocess import DEVNULL
28 from typing import Optional
30 from nfvbench.summarizer import Formatter
31 from nfvbench.traffic_gen.traffic_utils import parse_rate_str
33 STATUS_ERROR = "ERROR"
41 @given('PROJECT_NAME: {project_name}')
42 def override_xtesting_project_name(context, project_name):
43 context.data['PROJECT_NAME'] = project_name
46 @given('TEST_DB_EXT_URL: {test_db_ext_url}')
47 def override_xtesting_test_db_ext_url(context, test_db_ext_url):
48 context.data['TEST_DB_EXT_URL'] = test_db_ext_url
51 @given('TEST_DB_URL: {test_db_url}')
52 def override_xtesting_test_db_url(context, test_db_url):
53 context.data['TEST_DB_URL'] = test_db_url
54 context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
57 @given('INSTALLER_TYPE: {installer_type}')
58 def override_xtesting_installer_type(context, installer_type):
59 context.data['INSTALLER_TYPE'] = installer_type
62 @given('DEPLOY_SCENARIO: {deploy_scenario}')
63 def override_xtesting_deploy_scenario(context, deploy_scenario):
64 context.data['DEPLOY_SCENARIO'] = deploy_scenario
67 @given('NODE_NAME: {node_name}')
68 def override_xtesting_node_name(context, node_name):
69 context.data['NODE_NAME'] = node_name
72 @given('BUILD_TAG: {build_tag}')
73 def override_xtesting_build_tag(context, build_tag):
74 context.data['BUILD_TAG'] = build_tag
77 @given('NFVbench config from file: {config_path}')
78 def init_config(context, config_path):
79 context.data['config'] = config_path
82 @given('a JSON NFVbench config')
83 def init_config_from_json(context):
84 context.json.update(json.loads(context.text))
87 @given('log file: {log_file_path}')
88 def log_config(context, log_file_path):
89 context.json['log_file'] = log_file_path
92 @given('json file: {json_file_path}')
93 def json_config(context, json_file_path):
94 context.json['json'] = json_file_path
98 def add_no_clean_up_flag(context):
99 context.json['no_cleanup'] = 'true'
102 @given('TRex is restarted')
103 def add_restart(context):
104 context.json['restart'] = 'true'
107 @given('{label} label')
108 def add_label(context, label):
109 context.json['label'] = label
112 @given('{frame_size} frame size')
113 def add_frame_size(context, frame_size):
114 context.json['frame_sizes'] = [frame_size]
117 @given('{flow_count} flow count')
118 def add_flow_count(context, flow_count):
119 context.json['flow_count'] = flow_count
122 @given('{rate} rate')
123 def add_rate(context, rate):
124 context.json['rate'] = rate
127 @given('{duration} sec run duration')
128 def add_duration(context, duration):
129 context.json['duration_sec'] = duration
132 @given('{percentage_rate} rate of previous scenario')
133 def add_percentage_rate(context, percentage_rate):
134 context.percentage_rate = percentage_rate
135 rate = percentage_previous_rate(context, percentage_rate)
136 context.json['rate'] = rate
142 @when('NFVbench API is ready')
143 @when('NFVbench API is ready on host {host_ip}')
144 @when('NFVbench API is ready on host {host_ip} and port {port:d}')
145 def start_server(context, host_ip: Optional[str]=None, port: Optional[int]=None):
146 # NFVbench server host IP and port number have been setup from environment variables (see
147 # environment.py:before_all()). Here we allow to override them from feature files:
148 if host_ip is not None:
149 context.host_ip = host_ip
154 # check if API is already available
156 "http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
157 except RequestException:
158 cmd = ["nfvbench", "-c", context.data['config'], "--server"]
159 if context.host_ip != "127.0.0.1":
161 cmd.append(context.host_ip)
162 if context.port != 7555:
164 cmd.append(str(context.port))
166 subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
168 test_nfvbench_api(context)
174 @then('run is started and waiting for result')
175 @then('{repeat:d} runs are started and waiting for maximum result')
176 def step_impl(context, repeat=1):
178 if 'json' not in context.json:
179 context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
180 '/nfvbench-' + context.tag + '-fs_' + \
181 context.json['frame_sizes'][0] + '-fc_' + \
182 context.json['flow_count'] + '-rate_' + \
183 context.json['rate'] + '.json'
184 json_base_name = context.json['json']
185 for i in range(repeat):
187 context.json['json'] = json_base_name.strip('.json') + '-' + str(i) + '.json'
189 url = "http://{ip}:{port}/start_run".format(ip=context.host_ip, port=context.port)
190 payload = json.dumps(context.json)
191 r = requests.post(url, data=payload, headers={'Content-Type': 'application/json'})
192 context.request_id = json.loads(r.text)["request_id"]
193 assert r.status_code == 200
194 result = wait_result(context)
195 results.append(result)
196 assert result["status"] == STATUS_OK
199 context.result = reduce(
200 lambda x, y: x if extract_value(x, "total_tx_rate") > extract_value(y,
201 "total_tx_rate") else y,
204 total_tx_rate = extract_value(context.result, "total_tx_rate")
205 overall = extract_value(context.result, "overall")
206 avg_delay_usec = extract_value(overall, "avg_delay_usec")
207 # create a synthesis with offered pps and latency values
208 context.synthesis['total_tx_rate'] = total_tx_rate
209 context.synthesis['avg_delay_usec'] = avg_delay_usec
212 @then('extract offered rate result')
213 def save_rate_result(context):
214 total_tx_rate = extract_value(context.result, "total_tx_rate")
215 context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
218 @then('verify throughput result is in same range as the previous result')
219 @then('verify throughput result is greater than {threshold} of the previous result')
220 def get_throughput_result_from_database(context, threshold='90%'):
221 last_result = get_last_result(context)
224 compare_throughput_values(context, last_result, threshold)
227 @then('verify latency result is in same range as the previous result')
228 @then('verify latency result is greater than {threshold} of the previous result')
229 def get_latency_result_from_database(context, threshold='90%'):
230 last_result = get_last_result(context)
233 compare_latency_values(context, last_result, threshold)
236 @then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
237 def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
238 """Check latency result against a fixed threshold.
240 Check that the average latency measured during the current scenario run is
241 lower or equal to the provided fixed reference value.
244 context: The context data of the current scenario run. It includes the
245 test results for that run.
247 max_avg_latency_usec: Reference value to be used as a threshold. This
248 is a maximum average latency expressed in microseconds.
251 AssertionError: The latency result is strictly greater than the reference value.
254 # Get the just measured average latency (a float):
255 new_avg_latency_usec = context.synthesis['avg_delay_usec']
257 # Compare measured value to reference:
258 if new_avg_latency_usec > max_avg_latency_usec:
259 raise AssertionError("Average latency higher than max threshold: "
260 "{avg_latency} usec > {threshold} usec".format(
261 avg_latency=round(new_avg_latency_usec),
262 threshold=round(max_avg_latency_usec)))
266 'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
267 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
268 max_reference_value):
270 reference_values = [min_reference_value + 'pps', max_reference_value + 'pps']
271 throughput_comparison(context, reference_values=reference_values)
275 'verify result is in [{min_reference_value}bps, {max_reference_value}bps] range for throughput')
276 def compare_throughput_bps_result_with_range_values(context, min_reference_value,
277 max_reference_value):
279 reference_values = [min_reference_value + 'bps', max_reference_value + 'bps']
280 throughput_comparison(context, reference_values=reference_values)
283 @then('verify result is in {reference_values} range for latency')
284 def compare_result_with_range_values(context, reference_values):
285 latency_comparison(context, reference_values=reference_values)
288 @then('verify throughput result is in same range as the characterization result')
289 @then('verify throughput result is greater than {threshold} of the characterization result')
290 def get_characterization_throughput_result_from_database(context, threshold='90%'):
291 last_result = get_last_result(context, True)
293 raise AssertionError("No characterization result found.")
294 compare_throughput_values(context, last_result, threshold)
297 @then('verify latency result is in same range as the characterization result')
298 @then('verify latency result is greater than {threshold} of the characterization result')
299 def get_characterization_latency_result_from_database(context, threshold='90%'):
300 last_result = get_last_result(context, True)
302 raise AssertionError("No characterization result found.")
303 compare_latency_values(context, last_result, threshold)
305 @then('push result to database')
306 def push_result_database(context):
307 if context.tag == "latency":
308 # override input rate value with percentage one to avoid no match
309 # if pps is not accurate with previous one
310 context.json["rate"] = context.percentage_rate
311 json_result = {"synthesis": context.synthesis, "input": context.json, "output": context.result}
313 if context.tag not in context.results:
314 context.results[context.tag] = [json_result]
316 context.results[context.tag].append(json_result)
322 @retry(AssertionError, tries=24, delay=5.0, logger=None)
323 def test_nfvbench_api(context):
325 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
326 assert r.status_code == 200
327 assert json.loads(r.text)["error_message"] == "no pending NFVbench run"
328 except RequestException as exc:
329 raise AssertionError("Fail to access NFVbench API") from exc
332 @retry(AssertionError, tries=1000, delay=2.0, logger=None)
333 def wait_result(context):
334 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
335 context.raw_result = r.text
336 result = json.loads(context.raw_result)
337 assert r.status_code == 200
338 assert result["status"] == STATUS_OK or result["status"] == STATUS_ERROR
342 def percentage_previous_rate(context, rate):
343 previous_rate = context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']]
345 if rate.endswith('%'):
346 rate_percent = convert_percentage_str_to_float(rate)
347 return str(int(previous_rate * rate_percent)) + 'pps'
348 raise Exception('Unknown rate string format %s' % rate)
351 def convert_percentage_str_to_float(percentage):
352 float_percent = float(percentage.replace('%', '').strip())
353 if float_percent <= 0 or float_percent > 100.0:
354 raise Exception('%s is out of valid range (must be 1-100%%)' % percentage)
355 return float_percent / 100
358 def compare_throughput_values(context, last_result, threshold):
359 assert last_result["output"]["status"] == context.result["status"]
360 if last_result["output"]["status"] == "OK":
361 old_throughput = extract_value(last_result["output"], "total_tx_rate")
362 throughput_comparison(context, old_throughput, threshold=threshold)
365 def compare_latency_values(context, last_result, threshold):
366 assert last_result["output"]["status"] == context.result["status"]
367 if last_result["output"]["status"] == "OK":
368 old_latency = extract_value(extract_value(last_result["output"], "overall"),
370 latency_comparison(context, old_latency, threshold=threshold)
373 def throughput_comparison(context, old_throughput_pps=None, threshold=None, reference_values=None):
374 current_throughput_pps = extract_value(context.result, "total_tx_rate")
376 if old_throughput_pps:
377 if not current_throughput_pps >= convert_percentage_str_to_float(
378 threshold) * old_throughput_pps:
379 raise AssertionError(
380 "Current run throughput {current_throughput_pps} is not over {threshold} "
381 " of previous value ({old_throughput_pps})".format(
382 current_throughput_pps=Formatter.suffix('pps')(
383 Formatter.standard(current_throughput_pps)),
384 threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(
385 Formatter.standard(old_throughput_pps))))
386 elif reference_values:
387 if context.unit == 'bps':
388 current_throughput = extract_value(context.result, "offered_tx_rate_bps")
389 reference_values = [int(parse_rate_str(x)['rate_bps']) for x in reference_values]
390 formatted_current_throughput = Formatter.bits(current_throughput)
391 formatted_min_reference_value = Formatter.bits(reference_values[0])
392 formatted_max_reference_value = Formatter.bits(reference_values[1])
394 current_throughput = current_throughput_pps
395 reference_values = [int(parse_rate_str(x)['rate_pps']) for x in reference_values]
396 formatted_current_throughput = Formatter.suffix('pps')(
397 Formatter.standard(current_throughput))
398 formatted_min_reference_value = Formatter.suffix('pps')(
399 Formatter.standard(reference_values[0]))
400 formatted_max_reference_value = Formatter.suffix('pps')(
401 Formatter.standard(reference_values[1]))
402 if not reference_values[0] <= int(current_throughput) <= reference_values[1]:
403 raise AssertionError(
404 "Current run throughput {current_throughput} is not in reference values "
405 "[{min_reference_value}, {max_reference_value}]".format(
406 current_throughput=formatted_current_throughput,
407 min_reference_value=formatted_min_reference_value,
408 max_reference_value=formatted_max_reference_value))
411 def latency_comparison(context, old_latency=None, threshold=None, reference_values=None):
412 overall = extract_value(context.result, "overall")
413 current_latency = extract_value(overall, "avg_delay_usec")
416 if not current_latency <= (2 - convert_percentage_str_to_float(threshold)) * old_latency:
417 threshold = str(200 - int(threshold.strip('%'))) + '%'
418 raise AssertionError(
419 "Current run latency {current_latency}usec is not less than {threshold} of "
420 "previous value ({old_latency}usec)".format(
421 current_latency=Formatter.standard(current_latency), threshold=threshold,
422 old_latency=Formatter.standard(old_latency)))
423 elif reference_values:
424 if not reference_values[0] <= current_latency <= reference_values[1]:
425 raise AssertionError(
426 "Current run latency {current_latency}usec is not in reference values "
427 "[{min_reference_value}, {max_reference_value}]".format(
428 current_latency=Formatter.standard(current_latency),
429 min_reference_value=Formatter.standard(reference_values[0]),
430 max_reference_value=Formatter.standard(reference_values[1])))
433 def get_result_from_input_values(input, result):
434 # Select required keys (other keys can be not set or unconsistent between scenarios)
435 required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
436 if 'user_label' in result:
437 required_keys.append('user_label')
438 if 'flavor_type' in result:
439 required_keys.append('flavor_type')
440 subset_input = dict((k, input[k]) for k in required_keys if k in input)
441 subset_result = dict((k, result[k]) for k in required_keys if k in result)
442 return subset_input == subset_result
445 def extract_value(obj, key):
446 """Pull all values of specified key from nested JSON."""
449 def extract(obj, arr, key):
450 """Recursively search for values of key in JSON tree."""
451 if isinstance(obj, dict):
452 for k, v in obj.items():
455 elif isinstance(v, (dict, list)):
457 elif isinstance(obj, list):
459 extract(item, arr, key)
462 results = extract(obj, arr, key)
466 def get_last_result(context, reference=None, page=None):
468 case_name = 'characterization'
470 case_name = context.CASE_NAME
471 url = context.data['TEST_DB_URL'] + '?project={project_name}&case={case_name}'.format(
472 project_name=context.data['PROJECT_NAME'], case_name=case_name)
473 if context.data['INSTALLER_TYPE']:
474 url += '&installer={installer_name}'.format(installer_name=context.data['INSTALLER_TYPE'])
475 if context.data['NODE_NAME']:
476 url += '&pod={pod_name}'.format(pod_name=context.data['NODE_NAME'])
477 url += '&criteria=PASS'
479 url += '&page={page}'.format(page=page)
480 last_results = requests.get(url)
481 assert last_results.status_code == 200
482 last_results = json.loads(last_results.text)
483 for result in last_results["results"]:
484 for tagged_result in result["details"]["results"][context.tag]:
485 if get_result_from_input_values(tagged_result["input"], context.json):
487 if last_results["pagination"]["current_page"] < last_results["pagination"]["total_pages"]:
488 page = last_results["pagination"]["current_page"] + 1
489 return get_last_result(context, page)