2 # Copyright 2021 Orange
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 from functools import reduce
19 from behave import given
20 from behave import when
21 from behave import then
22 from requests import RequestException
23 from retry import retry
27 from subprocess import DEVNULL
29 from nfvbench.summarizer import Formatter
30 from nfvbench.traffic_gen.traffic_utils import parse_rate_str
32 STATUS_ERROR = "ERROR"
40 @given('PROJECT_NAME: {project_name}')
41 def override_xtesting_project_name(context, project_name):
42 context.data['PROJECT_NAME'] = project_name
45 @given('TEST_DB_EXT_URL: {test_db_ext_url}')
46 def override_xtesting_test_db_ext_url(context, test_db_ext_url):
47 context.data['TEST_DB_EXT_URL'] = test_db_ext_url
50 @given('TEST_DB_URL: {test_db_url}')
51 def override_xtesting_test_db_url(context, test_db_url):
52 context.data['TEST_DB_URL'] = test_db_url
53 context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
56 @given('INSTALLER_TYPE: {installer_type}')
57 def override_xtesting_installer_type(context, installer_type):
58 context.data['INSTALLER_TYPE'] = installer_type
61 @given('DEPLOY_SCENARIO: {deploy_scenario}')
62 def override_xtesting_deploy_scenario(context, deploy_scenario):
63 context.data['DEPLOY_SCENARIO'] = deploy_scenario
66 @given('NODE_NAME: {node_name}')
67 def override_xtesting_node_name(context, node_name):
68 context.data['NODE_NAME'] = node_name
71 @given('BUILD_TAG: {build_tag}')
72 def override_xtesting_build_tag(context, build_tag):
73 context.data['BUILD_TAG'] = build_tag
76 @given('NFVbench config from file: {config_path}')
77 def init_config(context, config_path):
78 context.data['config'] = config_path
81 @given('a JSON NFVbench config')
82 def init_config_from_json(context):
83 context.json.update(json.loads(context.text))
86 @given('log file: {log_file_path}')
87 def log_config(context, log_file_path):
88 context.json['log_file'] = log_file_path
91 @given('json file: {json_file_path}')
92 def json_config(context, json_file_path):
93 context.json['json'] = json_file_path
97 def add_no_clean_up_flag(context):
98 context.json['no_cleanup'] = 'true'
101 @given('TRex is restarted')
102 def add_restart(context):
103 context.json['restart'] = 'true'
106 @given('{label} label')
107 def add_label(context, label):
108 context.json['label'] = label
111 @given('{frame_size} frame size')
112 def add_frame_size(context, frame_size):
113 context.json['frame_sizes'] = [frame_size]
116 @given('{flow_count} flow count')
117 def add_flow_count(context, flow_count):
118 context.json['flow_count'] = flow_count
121 @given('{rate} rate')
122 def add_rate(context, rate):
123 context.json['rate'] = rate
126 @given('{duration} sec run duration')
127 def add_duration(context, duration):
128 context.json['duration_sec'] = duration
131 @given('{percentage_rate} rate of previous scenario')
132 def add_percentage_rate(context, percentage_rate):
133 context.percentage_rate = percentage_rate
134 rate = percentage_previous_rate(context, percentage_rate)
135 context.json['rate'] = rate
141 @when('NFVbench API is ready')
142 @when('NFVbench API is ready on host {host_ip}')
143 @when('NFVbench API is ready on host {host_ip} and port {port:d}')
144 def start_server(context, host_ip="127.0.0.1", port=7555):
145 context.host_ip = host_ip
148 # check if API is already available
150 "http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
151 except RequestException:
152 cmd = ["nfvbench", "-c", context.data['config'], "--server"]
153 if host_ip != "127.0.0.1":
160 subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
162 test_nfvbench_api(context)
168 @then('run is started and waiting for result')
169 @then('{repeat:d} runs are started and waiting for maximum result')
170 def step_impl(context, repeat=1):
172 if 'json' not in context.json:
173 context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
174 '/nfvbench-' + context.tag + '-fs_' + \
175 context.json['frame_sizes'][0] + '-fc_' + \
176 context.json['flow_count'] + '-rate_' + \
177 context.json['rate'] + '.json'
178 json_base_name = context.json['json']
179 for i in range(repeat):
181 context.json['json'] = json_base_name.strip('.json') + '-' + str(i) + '.json'
183 url = "http://{ip}:{port}/start_run".format(ip=context.host_ip, port=context.port)
184 payload = json.dumps(context.json)
185 r = requests.post(url, data=payload, headers={'Content-Type': 'application/json'})
186 context.request_id = json.loads(r.text)["request_id"]
187 assert r.status_code == 200
188 result = wait_result(context)
189 results.append(result)
190 assert result["status"] == STATUS_OK
193 context.result = reduce(
194 lambda x, y: x if extract_value(x, "total_tx_rate") > extract_value(y,
195 "total_tx_rate") else y,
198 total_tx_rate = extract_value(context.result, "total_tx_rate")
199 overall = extract_value(context.result, "overall")
200 avg_delay_usec = extract_value(overall, "avg_delay_usec")
201 # create a synthesis with offered pps and latency values
202 context.synthesis['total_tx_rate'] = total_tx_rate
203 context.synthesis['avg_delay_usec'] = avg_delay_usec
206 @then('extract offered rate result')
207 def save_rate_result(context):
208 total_tx_rate = extract_value(context.result, "total_tx_rate")
209 context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
212 @then('verify throughput result is in same range as the previous result')
213 @then('verify throughput result is greater than {threshold} of the previous result')
214 def get_throughput_result_from_database(context, threshold='90%'):
215 last_result = get_last_result(context)
218 compare_throughput_values(context, last_result, threshold)
221 @then('verify latency result is in same range as the previous result')
222 @then('verify latency result is greater than {threshold} of the previous result')
223 def get_latency_result_from_database(context, threshold='90%'):
224 last_result = get_last_result(context)
227 compare_latency_values(context, last_result, threshold)
230 'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
231 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
232 max_reference_value):
234 reference_values = [min_reference_value + 'pps', max_reference_value + 'pps']
235 throughput_comparison(context, reference_values=reference_values)
239 'verify result is in [{min_reference_value}bps, {max_reference_value}bps] range for throughput')
240 def compare_throughput_bps_result_with_range_values(context, min_reference_value,
241 max_reference_value):
243 reference_values = [min_reference_value + 'bps', max_reference_value + 'bps']
244 throughput_comparison(context, reference_values=reference_values)
247 @then('verify result is in {reference_values} range for latency')
248 def compare_result_with_range_values(context, reference_values):
249 latency_comparison(context, reference_values=reference_values)
252 @then('verify throughput result is in same range as the characterization result')
253 @then('verify throughput result is greater than {threshold} of the characterization result')
254 def get_characterization_throughput_result_from_database(context, threshold='90%'):
255 last_result = get_last_result(context, True)
257 raise AssertionError("No characterization result found.")
258 compare_throughput_values(context, last_result, threshold)
261 @then('verify latency result is in same range as the characterization result')
262 @then('verify latency result is greater than {threshold} of the characterization result')
263 def get_characterization_latency_result_from_database(context, threshold='90%'):
264 last_result = get_last_result(context, True)
266 raise AssertionError("No characterization result found.")
267 compare_latency_values(context, last_result, threshold)
269 @then('push result to database')
270 def push_result_database(context):
271 if context.tag == "latency":
272 # override input rate value with percentage one to avoid no match
273 # if pps is not accurate with previous one
274 context.json["rate"] = context.percentage_rate
275 json_result = {"synthesis": context.synthesis, "input": context.json, "output": context.result}
277 if context.tag not in context.results:
278 context.results[context.tag] = [json_result]
280 context.results[context.tag].append(json_result)
286 @retry(AssertionError, tries=24, delay=5.0, logger=None)
287 def test_nfvbench_api(context):
289 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
290 assert r.status_code == 200
291 assert json.loads(r.text)["error_message"] == "no pending NFVbench run"
292 except RequestException as exc:
293 raise AssertionError("Fail to access NFVbench API") from exc
296 @retry(AssertionError, tries=1000, delay=2.0, logger=None)
297 def wait_result(context):
298 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
299 context.raw_result = r.text
300 result = json.loads(context.raw_result)
301 assert r.status_code == 200
302 assert result["status"] == STATUS_OK or result["status"] == STATUS_ERROR
306 def percentage_previous_rate(context, rate):
307 previous_rate = context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']]
309 if rate.endswith('%'):
310 rate_percent = convert_percentage_str_to_float(rate)
311 return str(int(previous_rate * rate_percent)) + 'pps'
312 raise Exception('Unknown rate string format %s' % rate)
315 def convert_percentage_str_to_float(percentage):
316 float_percent = float(percentage.replace('%', '').strip())
317 if float_percent <= 0 or float_percent > 100.0:
318 raise Exception('%s is out of valid range (must be 1-100%%)' % percentage)
319 return float_percent / 100
322 def compare_throughput_values(context, last_result, threshold):
323 assert last_result["output"]["status"] == context.result["status"]
324 if last_result["output"]["status"] == "OK":
325 old_throughput = extract_value(last_result["output"], "total_tx_rate")
326 throughput_comparison(context, old_throughput, threshold=threshold)
329 def compare_latency_values(context, last_result, threshold):
330 assert last_result["output"]["status"] == context.result["status"]
331 if last_result["output"]["status"] == "OK":
332 old_latency = extract_value(extract_value(last_result["output"], "overall"),
334 latency_comparison(context, old_latency, threshold=threshold)
337 def throughput_comparison(context, old_throughput_pps=None, threshold=None, reference_values=None):
338 current_throughput_pps = extract_value(context.result, "total_tx_rate")
340 if old_throughput_pps:
341 if not current_throughput_pps >= convert_percentage_str_to_float(
342 threshold) * old_throughput_pps:
343 raise AssertionError(
344 "Current run throughput {current_throughput_pps} is not over {threshold} "
345 " of previous value ({old_throughput_pps})".format(
346 current_throughput_pps=Formatter.suffix('pps')(
347 Formatter.standard(current_throughput_pps)),
348 threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(
349 Formatter.standard(old_throughput_pps))))
350 elif reference_values:
351 if context.unit == 'bps':
352 current_throughput = extract_value(context.result, "offered_tx_rate_bps")
353 reference_values = [int(parse_rate_str(x)['rate_bps']) for x in reference_values]
354 formatted_current_throughput = Formatter.bits(current_throughput)
355 formatted_min_reference_value = Formatter.bits(reference_values[0])
356 formatted_max_reference_value = Formatter.bits(reference_values[1])
358 current_throughput = current_throughput_pps
359 reference_values = [int(parse_rate_str(x)['rate_pps']) for x in reference_values]
360 formatted_current_throughput = Formatter.suffix('pps')(
361 Formatter.standard(current_throughput))
362 formatted_min_reference_value = Formatter.suffix('pps')(
363 Formatter.standard(reference_values[0]))
364 formatted_max_reference_value = Formatter.suffix('pps')(
365 Formatter.standard(reference_values[1]))
366 if not reference_values[0] <= int(current_throughput) <= reference_values[1]:
367 raise AssertionError(
368 "Current run throughput {current_throughput} is not in reference values "
369 "[{min_reference_value}, {max_reference_value}]".format(
370 current_throughput=formatted_current_throughput,
371 min_reference_value=formatted_min_reference_value,
372 max_reference_value=formatted_max_reference_value))
375 def latency_comparison(context, old_latency=None, threshold=None, reference_values=None):
376 overall = extract_value(context.result, "overall")
377 current_latency = extract_value(overall, "avg_delay_usec")
380 if not current_latency <= (2 - convert_percentage_str_to_float(threshold)) * old_latency:
381 threshold = str(200 - int(threshold.strip('%'))) + '%'
382 raise AssertionError(
383 "Current run latency {current_latency}usec is not less than {threshold} of "
384 "previous value ({old_latency}usec)".format(
385 current_latency=Formatter.standard(current_latency), threshold=threshold,
386 old_latency=Formatter.standard(old_latency)))
387 elif reference_values:
388 if not reference_values[0] <= current_latency <= reference_values[1]:
389 raise AssertionError(
390 "Current run latency {current_latency}usec is not in reference values "
391 "[{min_reference_value}, {max_reference_value}]".format(
392 current_latency=Formatter.standard(current_latency),
393 min_reference_value=Formatter.standard(reference_values[0]),
394 max_reference_value=Formatter.standard(reference_values[1])))
397 def get_result_from_input_values(input, result):
398 # Select required keys (other keys can be not set or unconsistent between scenarios)
399 required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
400 if 'user_label' in result:
401 required_keys.append('user_label')
402 if 'flavor_type' in result:
403 required_keys.append('flavor_type')
404 subset_input = dict((k, input[k]) for k in required_keys if k in input)
405 subset_result = dict((k, result[k]) for k in required_keys if k in result)
406 return subset_input == subset_result
409 def extract_value(obj, key):
410 """Pull all values of specified key from nested JSON."""
413 def extract(obj, arr, key):
414 """Recursively search for values of key in JSON tree."""
415 if isinstance(obj, dict):
416 for k, v in obj.items():
419 elif isinstance(v, (dict, list)):
421 elif isinstance(obj, list):
423 extract(item, arr, key)
426 results = extract(obj, arr, key)
430 def get_last_result(context, reference=None, page=None):
432 case_name = 'characterization'
434 case_name = context.CASE_NAME
435 url = context.data['TEST_DB_URL'] + '?project={project_name}&case={case_name}'.format(
436 project_name=context.data['PROJECT_NAME'], case_name=case_name)
437 if context.data['INSTALLER_TYPE']:
438 url += '&installer={installer_name}'.format(installer_name=context.data['INSTALLER_TYPE'])
439 if context.data['NODE_NAME']:
440 url += '&pod={pod_name}'.format(pod_name=context.data['NODE_NAME'])
441 url += '&criteria=PASS'
443 url += '&page={page}'.format(page=page)
444 last_results = requests.get(url)
445 assert last_results.status_code == 200
446 last_results = json.loads(last_results.text)
447 for result in last_results["results"]:
448 for tagged_result in result["details"]["results"][context.tag]:
449 if get_result_from_input_values(tagged_result["input"], context.json):
451 if last_results["pagination"]["current_page"] < last_results["pagination"]["total_pages"]:
452 page = last_results["pagination"]["current_page"] + 1
453 return get_last_result(context, page)