2 # Copyright 2021 Orange
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 from functools import reduce
19 from behave import given
20 from behave import when
21 from behave import then
22 from requests import RequestException
23 from retry import retry
27 from subprocess import DEVNULL
29 from nfvbench.summarizer import Formatter
30 from nfvbench.traffic_gen.traffic_utils import parse_rate_str
32 STATUS_ERROR = "ERROR"
40 @given('PROJECT_NAME: {project_name}')
41 def override_xtesting_project_name(context, project_name):
42 context.data['PROJECT_NAME'] = project_name
45 @given('TEST_DB_EXT_URL: {test_db_ext_url}')
46 def override_xtesting_test_db_ext_url(context, test_db_ext_url):
47 context.data['TEST_DB_EXT_URL'] = test_db_ext_url
50 @given('TEST_DB_URL: {test_db_url}')
51 def override_xtesting_test_db_url(context, test_db_url):
52 context.data['TEST_DB_URL'] = test_db_url
53 context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
56 @given('INSTALLER_TYPE: {installer_type}')
57 def override_xtesting_installer_type(context, installer_type):
58 context.data['INSTALLER_TYPE'] = installer_type
61 @given('DEPLOY_SCENARIO: {deploy_scenario}')
62 def override_xtesting_deploy_scenario(context, deploy_scenario):
63 context.data['DEPLOY_SCENARIO'] = deploy_scenario
66 @given('NODE_NAME: {node_name}')
67 def override_xtesting_node_name(context, node_name):
68 context.data['NODE_NAME'] = node_name
71 @given('BUILD_TAG: {build_tag}')
72 def override_xtesting_build_tag(context, build_tag):
73 context.data['BUILD_TAG'] = build_tag
76 @given('NFVbench config from file: {config_path}')
77 def init_config(context, config_path):
78 context.data['config'] = config_path
81 @given('a JSON NFVbench config')
82 def init_config_from_json(context):
83 context.json.update(json.loads(context.text))
86 @given('log file: {log_file_path}')
87 def log_config(context, log_file_path):
88 context.json['log_file'] = log_file_path
91 @given('json file: {json_file_path}')
92 def json_config(context, json_file_path):
93 context.json['json'] = json_file_path
97 def add_no_clean_up_flag(context):
98 context.json['no_cleanup'] = 'true'
101 @given('TRex is restarted')
102 def add_restart(context):
103 context.json['restart'] = 'true'
106 @given('{label} label')
107 def add_label(context, label):
108 context.json['label'] = label
111 @given('{frame_size} frame size')
112 def add_frame_size(context, frame_size):
113 context.json['frame_sizes'] = [frame_size]
116 @given('{flow_count} flow count')
117 def add_flow_count(context, flow_count):
118 context.json['flow_count'] = flow_count
121 @given('{rate} rate')
122 def add_rate(context, rate):
123 context.json['rate'] = rate
126 @given('{duration} sec run duration')
127 def add_duration(context, duration):
128 context.json['duration_sec'] = duration
131 @given('{percentage_rate} rate of previous scenario')
132 def add_percentage_rate(context, percentage_rate):
133 context.percentage_rate = percentage_rate
134 rate = percentage_previous_rate(context, percentage_rate)
135 context.json['rate'] = rate
141 @when('NFVbench API is ready')
142 @when('NFVbench API is ready on host {host_ip}')
143 @when('NFVbench API is ready on host {host_ip} and port {port:d}')
144 def start_server(context, host_ip="127.0.0.1", port=7555):
145 context.host_ip = host_ip
148 # check if API is already available
150 "http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
151 except RequestException:
152 cmd = ["nfvbench", "-c", context.data['config'], "--server"]
153 if host_ip != "127.0.0.1":
160 subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
162 test_nfvbench_api(context)
168 @then('run is started and waiting for result')
169 @then('{repeat:d} runs are started and waiting for maximum result')
170 def step_impl(context, repeat=1):
172 if 'json' not in context.json:
173 context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
174 '/nfvbench-' + context.tag + '-fs_' + \
175 context.json['frame_sizes'][0] + '-fc_' + \
176 context.json['flow_count'] + '-rate_' + \
177 context.json['rate'] + '.json'
178 json_base_name = context.json['json']
179 for i in range(repeat):
181 context.json['json'] = json_base_name.strip('.json') + '-' + str(i) + '.json'
183 url = "http://{ip}:{port}/start_run".format(ip=context.host_ip, port=context.port)
184 payload = json.dumps(context.json)
185 r = requests.post(url, data=payload, headers={'Content-Type': 'application/json'})
186 context.request_id = json.loads(r.text)["request_id"]
187 assert r.status_code == 200
188 result = wait_result(context)
189 results.append(result)
190 assert result["status"] == STATUS_OK
193 context.result = reduce(
194 lambda x, y: x if extract_value(x, "total_tx_rate") > extract_value(y,
195 "total_tx_rate") else y,
198 total_tx_rate = extract_value(context.result, "total_tx_rate")
199 context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
200 overall = extract_value(context.result, "overall")
201 avg_delay_usec = extract_value(overall, "avg_delay_usec")
202 # create a synthesis with offered pps and latency values
203 context.synthesis['total_tx_rate'] = total_tx_rate
204 context.synthesis['avg_delay_usec'] = avg_delay_usec
207 @then('extract offered rate result')
208 def save_rate_result(context):
209 total_tx_rate = extract_value(context.result, "total_tx_rate")
210 context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
213 @then('verify throughput result is in same range as the previous result')
214 @then('verify throughput result is greater than {threshold} of the previous result')
215 def get_throughput_result_from_database(context, threshold='90%'):
216 last_result = get_last_result(context)
219 compare_throughput_values(context, last_result, threshold)
222 @then('verify latency result is in same range as the previous result')
223 @then('verify latency result is greater than {threshold} of the previous result')
224 def get_latency_result_from_database(context, threshold='90%'):
225 last_result = get_last_result(context)
228 compare_latency_values(context, last_result, threshold)
231 'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
232 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
233 max_reference_value):
235 reference_values = [min_reference_value + 'pps', max_reference_value + 'pps']
236 throughput_comparison(context, reference_values=reference_values)
240 'verify result is in [{min_reference_value}bps, {max_reference_value}bps] range for throughput')
241 def compare_throughput_bps_result_with_range_values(context, min_reference_value,
242 max_reference_value):
244 reference_values = [min_reference_value + 'bps', max_reference_value + 'bps']
245 throughput_comparison(context, reference_values=reference_values)
248 @then('verify result is in {reference_values} range for latency')
249 def compare_result_with_range_values(context, reference_values):
250 latency_comparison(context, reference_values=reference_values)
253 @then('verify throughput result is in same range as the characterization result')
254 @then('verify throughput result is greater than {threshold} of the characterization result')
255 def get_characterization_throughput_result_from_database(context, threshold='90%'):
256 last_result = get_last_result(context, True)
258 raise AssertionError("No characterization result found.")
259 compare_throughput_values(context, last_result, threshold)
262 @then('verify latency result is in same range as the characterization result')
263 @then('verify latency result is greater than {threshold} of the characterization result')
264 def get_characterization_latency_result_from_database(context, threshold='90%'):
265 last_result = get_last_result(context, True)
267 raise AssertionError("No characterization result found.")
268 compare_latency_values(context, last_result, threshold)
270 @then('push result to database')
271 def push_result_database(context):
272 if context.tag == "latency":
273 # override input rate value with percentage one to avoid no match
274 # if pps is not accurate with previous one
275 context.json["rate"] = context.percentage_rate
276 json_result = {"synthesis": context.synthesis, "input": context.json, "output": context.result}
278 if context.tag not in context.results:
279 context.results[context.tag] = [json_result]
281 context.results[context.tag].append(json_result)
287 @retry(AssertionError, tries=10, delay=5.0, logger=None)
288 def test_nfvbench_api(context):
290 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
291 assert r.status_code == 200
292 assert json.loads(r.text)["error_message"] == "no pending NFVbench run"
293 except RequestException as exc:
294 raise AssertionError("Fail to access NFVbench API") from exc
297 @retry(AssertionError, tries=1000, delay=2.0, logger=None)
298 def wait_result(context):
299 r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
300 context.raw_result = r.text
301 result = json.loads(context.raw_result)
302 assert r.status_code == 200
303 assert result["status"] == STATUS_OK or result["status"] == STATUS_ERROR
307 def percentage_previous_rate(context, rate):
308 previous_rate = context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']]
310 if rate.endswith('%'):
311 rate_percent = convert_percentage_str_to_float(rate)
312 return str(int(previous_rate * rate_percent)) + 'pps'
313 raise Exception('Unknown rate string format %s' % rate)
316 def convert_percentage_str_to_float(percentage):
317 float_percent = float(percentage.replace('%', '').strip())
318 if float_percent <= 0 or float_percent > 100.0:
319 raise Exception('%s is out of valid range (must be 1-100%%)' % percentage)
320 return float_percent / 100
323 def compare_throughput_values(context, last_result, threshold):
324 assert last_result["output"]["status"] == context.result["status"]
325 if last_result["output"]["status"] == "OK":
326 old_throughput = extract_value(last_result["output"], "total_tx_rate")
327 throughput_comparison(context, old_throughput, threshold=threshold)
330 def compare_latency_values(context, last_result, threshold):
331 assert last_result["output"]["status"] == context.result["status"]
332 if last_result["output"]["status"] == "OK":
333 old_latency = extract_value(extract_value(last_result["output"], "overall"),
335 latency_comparison(context, old_latency, threshold=threshold)
338 def throughput_comparison(context, old_throughput_pps=None, threshold=None, reference_values=None):
339 current_throughput_pps = extract_value(context.result, "total_tx_rate")
341 if old_throughput_pps:
342 if not current_throughput_pps >= convert_percentage_str_to_float(
343 threshold) * old_throughput_pps:
344 raise AssertionError(
345 "Current run throughput {current_throughput_pps} is not over {threshold} "
346 " of previous value ({old_throughput_pps}pps)".format(
347 current_throughput_pps=Formatter.suffix('pps')(
348 Formatter.standard(current_throughput_pps)),
349 threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(
350 Formatter.standard(old_throughput_pps))))
351 elif reference_values:
352 if context.unit == 'bps':
353 current_throughput = extract_value(context.result, "offered_tx_rate_bps")
354 reference_values = [int(parse_rate_str(x)['rate_bps']) for x in reference_values]
355 formatted_current_throughput = Formatter.bits(current_throughput)
356 formatted_min_reference_value = Formatter.bits(reference_values[0])
357 formatted_max_reference_value = Formatter.bits(reference_values[1])
359 current_throughput = current_throughput_pps
360 reference_values = [int(parse_rate_str(x)['rate_pps']) for x in reference_values]
361 formatted_current_throughput = Formatter.suffix('pps')(
362 Formatter.standard(current_throughput))
363 formatted_min_reference_value = Formatter.suffix('pps')(
364 Formatter.standard(reference_values[0]))
365 formatted_max_reference_value = Formatter.suffix('pps')(
366 Formatter.standard(reference_values[1]))
367 if not reference_values[0] <= int(current_throughput) <= reference_values[1]:
368 raise AssertionError(
369 "Current run throughput {current_throughput} is not in reference values "
370 "[{min_reference_value}, {max_reference_value}]".format(
371 current_throughput=formatted_current_throughput,
372 min_reference_value=formatted_min_reference_value,
373 max_reference_value=formatted_max_reference_value))
376 def latency_comparison(context, old_latency=None, threshold=None, reference_values=None):
377 overall = extract_value(context.result, "overall")
378 current_latency = extract_value(overall, "avg_delay_usec")
381 if not current_latency <= (2 - convert_percentage_str_to_float(threshold)) * old_latency:
382 threshold = str(200 - int(threshold.strip('%'))) + '%'
383 raise AssertionError(
384 "Current run latency {current_latency}usec is not less than {threshold} of "
385 "previous value ({old_latency}usec)".format(
386 current_latency=Formatter.standard(current_latency), threshold=threshold,
387 old_latency=Formatter.standard(old_latency)))
388 elif reference_values:
389 if not reference_values[0] <= current_latency <= reference_values[1]:
390 raise AssertionError(
391 "Current run latency {current_latency}usec is not in reference values "
392 "[{min_reference_value}, {max_reference_value}]".format(
393 current_latency=Formatter.standard(current_latency),
394 min_reference_value=Formatter.standard(reference_values[0]),
395 max_reference_value=Formatter.standard(reference_values[1])))
398 def get_result_from_input_values(input, result):
399 # Select required keys (other keys can be not set or unconsistent between scenarios)
400 required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
401 if 'user_label' in result:
402 required_keys.append('user_label')
403 if 'flavor_type' in result:
404 required_keys.append('flavor_type')
405 subset_input = dict((k, input[k]) for k in required_keys if k in input)
406 subset_result = dict((k, result[k]) for k in required_keys if k in result)
407 return subset_input == subset_result
410 def extract_value(obj, key):
411 """Pull all values of specified key from nested JSON."""
414 def extract(obj, arr, key):
415 """Recursively search for values of key in JSON tree."""
416 if isinstance(obj, dict):
417 for k, v in obj.items():
420 elif isinstance(v, (dict, list)):
422 elif isinstance(obj, list):
424 extract(item, arr, key)
427 results = extract(obj, arr, key)
431 def get_last_result(context, reference=None, page=None):
433 case_name = 'characterization'
435 case_name = context.CASE_NAME
436 url = context.data['TEST_DB_URL'] + '?project={project_name}&case={case_name}'.format(
437 project_name=context.data['PROJECT_NAME'], case_name=case_name)
438 if context.data['INSTALLER_TYPE']:
439 url += '&installer={installer_name}'.format(installer_name=context.data['INSTALLER_TYPE'])
440 if context.data['NODE_NAME']:
441 url += '&pod={pod_name}'.format(pod_name=context.data['NODE_NAME'])
442 url += '&criteria=PASS'
444 url += '&page={page}'.format(page=page)
445 last_results = requests.get(url)
446 assert last_results.status_code == 200
447 last_results = json.loads(last_results.text)
448 for result in last_results["results"]:
449 for tagged_result in result["details"]["results"][context.tag]:
450 if get_result_from_input_values(tagged_result["input"], context.json):
452 if last_results["pagination"]["current_page"] < last_results["pagination"]["total_pages"]:
453 page = last_results["pagination"]["current_page"] + 1
454 return get_last_result(context, page)