2 # Copyright 2021 Orange
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 from functools import reduce
19 from behave import given
20 from behave import when
21 from behave import then
22 from requests import RequestException
23 from retry import retry
27 from subprocess import DEVNULL
29 from nfvbench.summarizer import Formatter
30 from nfvbench.traffic_gen.traffic_utils import parse_rate_str
32 STATUS_ERROR = "ERROR"
40 @given('PROJECT_NAME: {project_name}')
41 def override_xtesting_project_name(context, project_name):
42 context.data['PROJECT_NAME'] = project_name
45 @given('TEST_DB_EXT_URL: {test_db_ext_url}')
46 def override_xtesting_test_db_ext_url(context, test_db_ext_url):
47 context.data['TEST_DB_EXT_URL'] = test_db_ext_url
50 @given('TEST_DB_URL: {test_db_url}')
51 def override_xtesting_test_db_url(context, test_db_url):
52 context.data['TEST_DB_URL'] = test_db_url
53 context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
56 @given('INSTALLER_TYPE: {installer_type}')
57 def override_xtesting_installer_type(context, installer_type):
58 context.data['INSTALLER_TYPE'] = installer_type
61 @given('DEPLOY_SCENARIO: {deploy_scenario}')
62 def override_xtesting_deploy_scenario(context, deploy_scenario):
63 context.data['DEPLOY_SCENARIO'] = deploy_scenario
66 @given('NODE_NAME: {node_name}')
67 def override_xtesting_node_name(context, node_name):
68 context.data['NODE_NAME'] = node_name
71 @given('BUILD_TAG: {build_tag}')
72 def override_xtesting_build_tag(context, build_tag):
73 context.data['BUILD_TAG'] = build_tag
76 @given('NFVbench config from file: {config_path}')
77 def init_config(context, config_path):
78 context.data['config'] = config_path
81 @given('a JSON NFVbench config')
82 def init_config_from_json(context):
83 context.json.update(json.loads(context.text))
86 @given('log file: {log_file_path}')
87 def log_config(context, log_file_path):
88 context.json['log_file'] = log_file_path
91 @given('json file: {json_file_path}')
92 def json_config(context, json_file_path):
93 context.json['json'] = json_file_path
97 def add_no_clean_up_flag(context):
98 context.json['no_cleanup'] = 'true'
101 @given('TRex is restarted')
102 def add_restart(context):
103 context.json['restart'] = 'true'
106 @given('{label} label')
107 def add_label(context, label):
108 context.json['label'] = label
111 @given('{frame_size} frame size')
112 def add_frame_size(context, frame_size):
113 context.json['frame_sizes'] = [frame_size]
116 @given('{flow_count} flow count')
117 def add_flow_count(context, flow_count):
118 context.json['flow_count'] = flow_count
121 @given('{rate} rate')
122 def add_rate(context, rate):
123 context.json['rate'] = rate
126 @given('{duration} sec run duration')
127 def add_duration(context, duration):
128 context.json['duration_sec'] = duration
131 @given('{percentage_rate} rate of previous scenario')
132 def add_percentage_rate(context, percentage_rate):
133 context.percentage_rate = percentage_rate
134 rate = percentage_previous_rate(context, percentage_rate)
135 context.json['rate'] = rate
141 @when('NFVbench API is ready')
142 @when('NFVbench API is ready on host {host_ip}')
143 @when('NFVbench API is ready on host {host_ip} and port {port:d}')
144 def start_server(context, host_ip="127.0.0.1", port=7555):
145 context.host_ip = host_ip
148 # check if API is already available
150 "http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
151 except RequestException:
152 cmd = ["nfvbench", "-c", context.data['config'], "--server"]
153 if host_ip != "127.0.0.1":
160 subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
162 test_nfvbench_api(context)
168 @then('run is started and waiting for result')
169 @then('{repeat:d} runs are started and waiting for maximum result')
170 def step_impl(context, repeat=1):
172 if 'json' not in context.json:
173 context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
174 '/nfvbench-' + context.tag + '-fs_' + \
175 context.json['frame_sizes'][0] + '-fc_' + \
176 context.json['flow_count'] + '-rate_' + \
177 context.json['rate'] + '.json'
178 json_base_name = context.json['json']
179 for i in range(repeat):
181 context.json['json'] = json_base_name.strip('.json') + '-' + str(i) + '.json'
183 url = "http://{ip}:{port}/start_run".format(ip=context.host_ip, port=context.port)
184 payload = json.dumps(context.json)
185 r = requests.post(url, data=payload, headers={'Content-Type': 'application/json'})
186 context.request_id = json.loads(r.text)["request_id"]
187 assert r.status_code == 200
188 result = wait_result(context)
189 results.append(result)
190 assert result["status"] == STATUS_OK
193 context.result = reduce(
194 lambda x, y: x if extract_value(x, "total_tx_rate") > extract_value(y,
195 "total_tx_rate") else y,
198 total_tx_rate = extract_value(context.result, "total_tx_rate")
199 overall = extract_value(context.result, "overall")
200 avg_delay_usec = extract_value(overall, "avg_delay_usec")
201 # create a synthesis with offered pps and latency values
202 context.synthesis['total_tx_rate'] = total_tx_rate
203 context.synthesis['avg_delay_usec'] = avg_delay_usec
206 @then('extract offered rate result')
207 def save_rate_result(context):
208 total_tx_rate = extract_value(context.result, "total_tx_rate")
209 context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
212 @then('verify throughput result is in same range as the previous result')
213 @then('verify throughput result is greater than {threshold} of the previous result')
214 def get_throughput_result_from_database(context, threshold='90%'):
215 last_result = get_last_result(context)
218 compare_throughput_values(context, last_result, threshold)
221 @then('verify latency result is in same range as the previous result')
222 @then('verify latency result is greater than {threshold} of the previous result')
223 def get_latency_result_from_database(context, threshold='90%'):
224 last_result = get_last_result(context)
227 compare_latency_values(context, last_result, threshold)
230 @then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
231 def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
232 """Check latency result against a fixed threshold.
234 Check that the average latency measured during the current scenario run is
235 lower or equal to the provided fixed reference value.
238 context: The context data of the current scenario run. It includes the
239 test results for that run.
241 max_avg_latency_usec: Reference value to be used as a threshold. This
242 is a maximum average latency expressed in microseconds.
245 AssertionError: The latency result is strictly greater than the reference value.
248 # Get the just measured average latency (a float):
249 new_avg_latency_usec = context.synthesis['avg_delay_usec']
251 # Compare measured value to reference:
252 if new_avg_latency_usec > max_avg_latency_usec:
253 raise AssertionError("Average latency higher than max threshold: "
254 "{avg_latency} usec > {threshold} usec".format(
255 avg_latency=round(new_avg_latency_usec),
256 threshold=round(max_avg_latency_usec)))
260 'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
261 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
262 max_reference_value):
264 reference_values = [min_reference_value + 'pps', max_reference_value + 'pps']
265 throughput_comparison(context, reference_values=reference_values)
269 'verify result is in [{min_reference_value}bps, {max_reference_value}bps] range for throughput')
270 def compare_throughput_bps_result_with_range_values(context, min_reference_value,
271 max_reference_value):
273 reference_values = [min_reference_value + 'bps', max_reference_value + 'bps']
274 throughput_comparison(context, reference_values=reference_values)
277 @then('verify result is in {reference_values} range for latency')
278 def compare_result_with_range_values(context, reference_values):
279 latency_comparison(context, reference_values=reference_values)
282 @then('verify throughput result is in same range as the characterization result')
283 @then('verify throughput result is greater than {threshold} of the characterization result')
284 def get_characterization_throughput_result_from_database(context, threshold='90%'):
285 last_result = get_last_result(context, True)
287 raise AssertionError("No characterization result found.")
288 compare_throughput_values(context, last_result, threshold)
291 @then('verify latency result is in same range as the characterization result')
292 @then('verify latency result is greater than {threshold} of the characterization result')
293 def get_characterization_latency_result_from_database(context, threshold='90%'):
294 last_result = get_last_result(context, True)
296 raise AssertionError("No characterization result found.")
297 compare_latency_values(context, last_result, threshold)
299 @then('push result to database')
300 def push_result_database(context):
301 if context.tag == "latency":
302 # override input rate value with percentage one to avoid no match
303 # if pps is not accurate with previous one
304 context.json["rate"] = context.percentage_rate
305 json_result = {"synthesis": context.synthesis, "input": context.json, "output": context.result}
307 if context.tag not in context.results:
308 context.results[context.tag] = [json_result]
310 context.results[context.tag].append(json_result)
316 @retry(AssertionError, tries=24, delay=5.0, logger=None)
317 def test_nfvbench_api(context):
319 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
320 assert r.status_code == 200
321 assert json.loads(r.text)["error_message"] == "no pending NFVbench run"
322 except RequestException as exc:
323 raise AssertionError("Fail to access NFVbench API") from exc
326 @retry(AssertionError, tries=1000, delay=2.0, logger=None)
327 def wait_result(context):
328 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
329 context.raw_result = r.text
330 result = json.loads(context.raw_result)
331 assert r.status_code == 200
332 assert result["status"] == STATUS_OK or result["status"] == STATUS_ERROR
336 def percentage_previous_rate(context, rate):
337 previous_rate = context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']]
339 if rate.endswith('%'):
340 rate_percent = convert_percentage_str_to_float(rate)
341 return str(int(previous_rate * rate_percent)) + 'pps'
342 raise Exception('Unknown rate string format %s' % rate)
345 def convert_percentage_str_to_float(percentage):
346 float_percent = float(percentage.replace('%', '').strip())
347 if float_percent <= 0 or float_percent > 100.0:
348 raise Exception('%s is out of valid range (must be 1-100%%)' % percentage)
349 return float_percent / 100
352 def compare_throughput_values(context, last_result, threshold):
353 assert last_result["output"]["status"] == context.result["status"]
354 if last_result["output"]["status"] == "OK":
355 old_throughput = extract_value(last_result["output"], "total_tx_rate")
356 throughput_comparison(context, old_throughput, threshold=threshold)
359 def compare_latency_values(context, last_result, threshold):
360 assert last_result["output"]["status"] == context.result["status"]
361 if last_result["output"]["status"] == "OK":
362 old_latency = extract_value(extract_value(last_result["output"], "overall"),
364 latency_comparison(context, old_latency, threshold=threshold)
367 def throughput_comparison(context, old_throughput_pps=None, threshold=None, reference_values=None):
368 current_throughput_pps = extract_value(context.result, "total_tx_rate")
370 if old_throughput_pps:
371 if not current_throughput_pps >= convert_percentage_str_to_float(
372 threshold) * old_throughput_pps:
373 raise AssertionError(
374 "Current run throughput {current_throughput_pps} is not over {threshold} "
375 " of previous value ({old_throughput_pps})".format(
376 current_throughput_pps=Formatter.suffix('pps')(
377 Formatter.standard(current_throughput_pps)),
378 threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(
379 Formatter.standard(old_throughput_pps))))
380 elif reference_values:
381 if context.unit == 'bps':
382 current_throughput = extract_value(context.result, "offered_tx_rate_bps")
383 reference_values = [int(parse_rate_str(x)['rate_bps']) for x in reference_values]
384 formatted_current_throughput = Formatter.bits(current_throughput)
385 formatted_min_reference_value = Formatter.bits(reference_values[0])
386 formatted_max_reference_value = Formatter.bits(reference_values[1])
388 current_throughput = current_throughput_pps
389 reference_values = [int(parse_rate_str(x)['rate_pps']) for x in reference_values]
390 formatted_current_throughput = Formatter.suffix('pps')(
391 Formatter.standard(current_throughput))
392 formatted_min_reference_value = Formatter.suffix('pps')(
393 Formatter.standard(reference_values[0]))
394 formatted_max_reference_value = Formatter.suffix('pps')(
395 Formatter.standard(reference_values[1]))
396 if not reference_values[0] <= int(current_throughput) <= reference_values[1]:
397 raise AssertionError(
398 "Current run throughput {current_throughput} is not in reference values "
399 "[{min_reference_value}, {max_reference_value}]".format(
400 current_throughput=formatted_current_throughput,
401 min_reference_value=formatted_min_reference_value,
402 max_reference_value=formatted_max_reference_value))
405 def latency_comparison(context, old_latency=None, threshold=None, reference_values=None):
406 overall = extract_value(context.result, "overall")
407 current_latency = extract_value(overall, "avg_delay_usec")
410 if not current_latency <= (2 - convert_percentage_str_to_float(threshold)) * old_latency:
411 threshold = str(200 - int(threshold.strip('%'))) + '%'
412 raise AssertionError(
413 "Current run latency {current_latency}usec is not less than {threshold} of "
414 "previous value ({old_latency}usec)".format(
415 current_latency=Formatter.standard(current_latency), threshold=threshold,
416 old_latency=Formatter.standard(old_latency)))
417 elif reference_values:
418 if not reference_values[0] <= current_latency <= reference_values[1]:
419 raise AssertionError(
420 "Current run latency {current_latency}usec is not in reference values "
421 "[{min_reference_value}, {max_reference_value}]".format(
422 current_latency=Formatter.standard(current_latency),
423 min_reference_value=Formatter.standard(reference_values[0]),
424 max_reference_value=Formatter.standard(reference_values[1])))
427 def get_result_from_input_values(input, result):
428 # Select required keys (other keys can be not set or unconsistent between scenarios)
429 required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
430 if 'user_label' in result:
431 required_keys.append('user_label')
432 if 'flavor_type' in result:
433 required_keys.append('flavor_type')
434 subset_input = dict((k, input[k]) for k in required_keys if k in input)
435 subset_result = dict((k, result[k]) for k in required_keys if k in result)
436 return subset_input == subset_result
439 def extract_value(obj, key):
440 """Pull all values of specified key from nested JSON."""
443 def extract(obj, arr, key):
444 """Recursively search for values of key in JSON tree."""
445 if isinstance(obj, dict):
446 for k, v in obj.items():
449 elif isinstance(v, (dict, list)):
451 elif isinstance(obj, list):
453 extract(item, arr, key)
456 results = extract(obj, arr, key)
460 def get_last_result(context, reference=None, page=None):
462 case_name = 'characterization'
464 case_name = context.CASE_NAME
465 url = context.data['TEST_DB_URL'] + '?project={project_name}&case={case_name}'.format(
466 project_name=context.data['PROJECT_NAME'], case_name=case_name)
467 if context.data['INSTALLER_TYPE']:
468 url += '&installer={installer_name}'.format(installer_name=context.data['INSTALLER_TYPE'])
469 if context.data['NODE_NAME']:
470 url += '&pod={pod_name}'.format(pod_name=context.data['NODE_NAME'])
471 url += '&criteria=PASS'
473 url += '&page={page}'.format(page=page)
474 last_results = requests.get(url)
475 assert last_results.status_code == 200
476 last_results = json.loads(last_results.text)
477 for result in last_results["results"]:
478 for tagged_result in result["details"]["results"][context.tag]:
479 if get_result_from_input_values(tagged_result["input"], context.json):
481 if last_results["pagination"]["current_page"] < last_results["pagination"]["total_pages"]:
482 page = last_results["pagination"]["current_page"] + 1
483 return get_last_result(context, page)