behave_tests: code cleaning (TEST_DB_EXT_URL)
[nfvbench.git] / behave_tests / features / steps / steps.py
1 #!/usr/bin/env python
2 # Copyright 2021 Orange
3 #
4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
5 #    not use this file except in compliance with the License. You may obtain
6 #    a copy of the License at
7 #
8 #         http://www.apache.org/licenses/LICENSE-2.0
9 #
10 #    Unless required by applicable law or agreed to in writing, software
11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 #    License for the specific language governing permissions and limitations
14 #    under the License.
15 #
16
17 from functools import reduce
18
19 from behave import given
20 from behave import when
21 from behave import then
22 from requests import RequestException
23 from retry import retry
24 import json
25 import requests
26 import subprocess
27 from subprocess import DEVNULL
28 from typing import Optional
29
30 from nfvbench.summarizer import Formatter
31 from nfvbench.traffic_gen.traffic_utils import parse_rate_str
32
33 STATUS_ERROR = "ERROR"
34
35 STATUS_OK = "OK"
36
37
38 """Given steps."""
39
40
41 @given('PROJECT_NAME: {project_name}')
42 def override_xtesting_project_name(context, project_name):
43     context.data['PROJECT_NAME'] = project_name
44
45
46 @given('TEST_DB_URL: {test_db_url}')
47 def override_xtesting_test_db_url(context, test_db_url):
48     context.data['TEST_DB_URL'] = test_db_url
49     context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
50
51
52 @given('INSTALLER_TYPE: {installer_type}')
53 def override_xtesting_installer_type(context, installer_type):
54     context.data['INSTALLER_TYPE'] = installer_type
55
56
57 @given('DEPLOY_SCENARIO: {deploy_scenario}')
58 def override_xtesting_deploy_scenario(context, deploy_scenario):
59     context.data['DEPLOY_SCENARIO'] = deploy_scenario
60
61
62 @given('NODE_NAME: {node_name}')
63 def override_xtesting_node_name(context, node_name):
64     context.data['NODE_NAME'] = node_name
65
66
67 @given('BUILD_TAG: {build_tag}')
68 def override_xtesting_build_tag(context, build_tag):
69     context.data['BUILD_TAG'] = build_tag
70
71
72 @given('NFVbench config from file: {config_path}')
73 def init_config(context, config_path):
74     context.data['config'] = config_path
75
76
77 @given('a JSON NFVbench config')
78 def init_config_from_json(context):
79     context.json.update(json.loads(context.text))
80
81
82 @given('log file: {log_file_path}')
83 def log_config(context, log_file_path):
84     context.json['log_file'] = log_file_path
85
86
87 @given('json file: {json_file_path}')
88 def json_config(context, json_file_path):
89     context.json['json'] = json_file_path
90
91
92 @given('no clean up')
93 def add_no_clean_up_flag(context):
94     context.json['no_cleanup'] = 'true'
95
96
97 @given('TRex is restarted')
98 def add_restart(context):
99     context.json['restart'] = 'true'
100
101
102 @given('{label} label')
103 def add_label(context, label):
104     context.json['label'] = label
105
106
107 @given('{frame_size} frame size')
108 def add_frame_size(context, frame_size):
109     context.json['frame_sizes'] = [frame_size]
110
111
112 @given('{flow_count} flow count')
113 def add_flow_count(context, flow_count):
114     context.json['flow_count'] = flow_count
115
116
117 @given('{rate} rate')
118 def add_rate(context, rate):
119     context.json['rate'] = rate
120
121
122 @given('{duration} sec run duration')
123 def add_duration(context, duration):
124     context.json['duration_sec'] = duration
125
126
127 @given('{percentage_rate} rate of previous scenario')
128 def add_percentage_rate(context, percentage_rate):
129     context.percentage_rate = percentage_rate
130     rate = percentage_previous_rate(context, percentage_rate)
131     context.json['rate'] = rate
132
133
134 """When steps."""
135
136
137 @when('NFVbench API is ready')
138 @when('NFVbench API is ready on host {host_ip}')
139 @when('NFVbench API is ready on host {host_ip} and port {port:d}')
140 def start_server(context, host_ip: Optional[str]=None, port: Optional[int]=None):
141     # NFVbench server host IP and port number have been setup from environment variables (see
142     # environment.py:before_all()).   Here we allow to override them from feature files:
143     if host_ip is not None:
144         context.host_ip = host_ip
145     if port is not None:
146         context.port = port
147
148     try:
149         # check if API is already available
150         requests.get(
151             "http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
152     except RequestException:
153         cmd = ["nfvbench", "-c", context.data['config'], "--server"]
154         if context.host_ip != "127.0.0.1":
155             cmd.append("--host")
156             cmd.append(context.host_ip)
157         if context.port != 7555:
158             cmd.append("--port")
159             cmd.append(str(context.port))
160
161         subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
162
163     test_nfvbench_api(context)
164
165
166 """Then steps."""
167
168
169 @then('run is started and waiting for result')
170 @then('{repeat:d} runs are started and waiting for maximum result')
171 def step_impl(context, repeat=1):
172     results = []
173     if 'json' not in context.json:
174         context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
175                                '/nfvbench-' + context.tag + '-fs_' + \
176                                context.json['frame_sizes'][0] + '-fc_' + \
177                                context.json['flow_count'] + '-rate_' + \
178                                context.json['rate'] + '.json'
179     json_base_name = context.json['json']
180     for i in range(repeat):
181         if repeat > 1:
182             context.json['json'] = json_base_name.strip('.json') + '-' + str(i) + '.json'
183
184         url = "http://{ip}:{port}/start_run".format(ip=context.host_ip, port=context.port)
185         payload = json.dumps(context.json)
186         r = requests.post(url, data=payload, headers={'Content-Type': 'application/json'})
187         context.request_id = json.loads(r.text)["request_id"]
188         assert r.status_code == 200
189         result = wait_result(context)
190         results.append(result)
191         assert result["status"] == STATUS_OK
192
193
194     context.result = reduce(
195         lambda x, y: x if extract_value(x, "total_tx_rate") > extract_value(y,
196                                                                             "total_tx_rate") else y,
197         results)
198
199     total_tx_rate = extract_value(context.result, "total_tx_rate")
200     overall = extract_value(context.result, "overall")
201     avg_delay_usec = extract_value(overall, "avg_delay_usec")
202     # create a synthesis with offered pps and latency values
203     context.synthesis['total_tx_rate'] = total_tx_rate
204     context.synthesis['avg_delay_usec'] = avg_delay_usec
205
206
207 @then('extract offered rate result')
208 def save_rate_result(context):
209     total_tx_rate = extract_value(context.result, "total_tx_rate")
210     context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
211
212
213 @then('verify throughput result is in same range as the previous result')
214 @then('verify throughput result is greater than {threshold} of the previous result')
215 def get_throughput_result_from_database(context, threshold='90%'):
216     last_result = get_last_result(context)
217
218     if last_result:
219         compare_throughput_values(context, last_result, threshold)
220
221
222 @then('verify latency result is in same range as the previous result')
223 @then('verify latency result is greater than {threshold} of the previous result')
224 def get_latency_result_from_database(context, threshold='90%'):
225     last_result = get_last_result(context)
226
227     if last_result:
228         compare_latency_values(context, last_result, threshold)
229
230
231 @then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
232 def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
233     """Check latency result against a fixed threshold.
234
235     Check that the average latency measured during the current scenario run is
236     lower or equal to the provided fixed reference value.
237
238     Args:
239         context: The context data of the current scenario run.  It includes the
240             test results for that run.
241
242         max_avg_latency_usec: Reference value to be used as a threshold.  This
243             is a maximum average latency expressed in microseconds.
244
245     Raises:
246         AssertionError: The latency result is strictly greater than the reference value.
247
248     """
249     # Get the just measured average latency (a float):
250     new_avg_latency_usec = context.synthesis['avg_delay_usec']
251
252     # Compare measured value to reference:
253     if new_avg_latency_usec > max_avg_latency_usec:
254         raise AssertionError("Average latency higher than max threshold: "
255                              "{avg_latency} usec > {threshold} usec".format(
256                                  avg_latency=round(new_avg_latency_usec),
257                                  threshold=round(max_avg_latency_usec)))
258
259
260 @then(
261     'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
262 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
263                                                     max_reference_value):
264     context.unit = 'pps'
265     reference_values = [min_reference_value + 'pps', max_reference_value + 'pps']
266     throughput_comparison(context, reference_values=reference_values)
267
268
269 @then(
270     'verify result is in [{min_reference_value}bps, {max_reference_value}bps] range for throughput')
271 def compare_throughput_bps_result_with_range_values(context, min_reference_value,
272                                                     max_reference_value):
273     context.unit = 'bps'
274     reference_values = [min_reference_value + 'bps', max_reference_value + 'bps']
275     throughput_comparison(context, reference_values=reference_values)
276
277
278 @then('verify result is in {reference_values} range for latency')
279 def compare_result_with_range_values(context, reference_values):
280     latency_comparison(context, reference_values=reference_values)
281
282
283 @then('verify throughput result is in same range as the characterization result')
284 @then('verify throughput result is greater than {threshold} of the characterization result')
285 def get_characterization_throughput_result_from_database(context, threshold='90%'):
286     last_result = get_last_result(context, True)
287     if not last_result:
288         raise AssertionError("No characterization result found.")
289     compare_throughput_values(context, last_result, threshold)
290
291
292 @then('verify latency result is in same range as the characterization result')
293 @then('verify latency result is greater than {threshold} of the characterization result')
294 def get_characterization_latency_result_from_database(context, threshold='90%'):
295     last_result = get_last_result(context, True)
296     if not last_result:
297         raise AssertionError("No characterization result found.")
298     compare_latency_values(context, last_result, threshold)
299
300 @then('push result to database')
301 def push_result_database(context):
302     if context.tag == "latency":
303         # override input rate value with percentage one to avoid no match
304         # if pps is not accurate with previous one
305         context.json["rate"] = context.percentage_rate
306     json_result = {"synthesis": context.synthesis, "input": context.json, "output": context.result}
307
308     if context.tag not in context.results:
309         context.results[context.tag] = [json_result]
310     else:
311         context.results[context.tag].append(json_result)
312
313
314 """Utils methods."""
315
316
317 @retry(AssertionError, tries=24, delay=5.0, logger=None)
318 def test_nfvbench_api(context):
319     try:
320         r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
321         assert r.status_code == 200
322         assert json.loads(r.text)["error_message"] == "no pending NFVbench run"
323     except RequestException as exc:
324         raise AssertionError("Fail to access NFVbench API") from exc
325
326
327 @retry(AssertionError, tries=1000, delay=2.0, logger=None)
328 def wait_result(context):
329     r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
330     context.raw_result = r.text
331     result = json.loads(context.raw_result)
332     assert r.status_code == 200
333     assert result["status"] == STATUS_OK or result["status"] == STATUS_ERROR
334     return result
335
336
337 def percentage_previous_rate(context, rate):
338     previous_rate = context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']]
339
340     if rate.endswith('%'):
341         rate_percent = convert_percentage_str_to_float(rate)
342         return str(int(previous_rate * rate_percent)) + 'pps'
343     raise Exception('Unknown rate string format %s' % rate)
344
345
346 def convert_percentage_str_to_float(percentage):
347     float_percent = float(percentage.replace('%', '').strip())
348     if float_percent <= 0 or float_percent > 100.0:
349         raise Exception('%s is out of valid range (must be 1-100%%)' % percentage)
350     return float_percent / 100
351
352
353 def compare_throughput_values(context, last_result, threshold):
354     assert last_result["output"]["status"] == context.result["status"]
355     if last_result["output"]["status"] == "OK":
356         old_throughput = extract_value(last_result["output"], "total_tx_rate")
357         throughput_comparison(context, old_throughput, threshold=threshold)
358
359
360 def compare_latency_values(context, last_result, threshold):
361     assert last_result["output"]["status"] == context.result["status"]
362     if last_result["output"]["status"] == "OK":
363         old_latency = extract_value(extract_value(last_result["output"], "overall"),
364                                     "avg_delay_usec")
365         latency_comparison(context, old_latency, threshold=threshold)
366
367
368 def throughput_comparison(context, old_throughput_pps=None, threshold=None, reference_values=None):
369     current_throughput_pps = extract_value(context.result, "total_tx_rate")
370
371     if old_throughput_pps:
372         if not current_throughput_pps >= convert_percentage_str_to_float(
373                 threshold) * old_throughput_pps:
374             raise AssertionError(
375                 "Current run throughput {current_throughput_pps} is not over {threshold} "
376                 " of previous value ({old_throughput_pps})".format(
377                     current_throughput_pps=Formatter.suffix('pps')(
378                         Formatter.standard(current_throughput_pps)),
379                     threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(
380                         Formatter.standard(old_throughput_pps))))
381     elif reference_values:
382         if context.unit == 'bps':
383             current_throughput = extract_value(context.result, "offered_tx_rate_bps")
384             reference_values = [int(parse_rate_str(x)['rate_bps']) for x in reference_values]
385             formatted_current_throughput = Formatter.bits(current_throughput)
386             formatted_min_reference_value = Formatter.bits(reference_values[0])
387             formatted_max_reference_value = Formatter.bits(reference_values[1])
388         else:
389             current_throughput = current_throughput_pps
390             reference_values = [int(parse_rate_str(x)['rate_pps']) for x in reference_values]
391             formatted_current_throughput = Formatter.suffix('pps')(
392                 Formatter.standard(current_throughput))
393             formatted_min_reference_value = Formatter.suffix('pps')(
394                 Formatter.standard(reference_values[0]))
395             formatted_max_reference_value = Formatter.suffix('pps')(
396                 Formatter.standard(reference_values[1]))
397         if not reference_values[0] <= int(current_throughput) <= reference_values[1]:
398             raise AssertionError(
399                 "Current run throughput {current_throughput} is not in reference values "
400                 "[{min_reference_value}, {max_reference_value}]".format(
401                     current_throughput=formatted_current_throughput,
402                     min_reference_value=formatted_min_reference_value,
403                     max_reference_value=formatted_max_reference_value))
404
405
406 def latency_comparison(context, old_latency=None, threshold=None, reference_values=None):
407     overall = extract_value(context.result, "overall")
408     current_latency = extract_value(overall, "avg_delay_usec")
409
410     if old_latency:
411         if not current_latency <= (2 - convert_percentage_str_to_float(threshold)) * old_latency:
412             threshold = str(200 - int(threshold.strip('%'))) + '%'
413             raise AssertionError(
414                 "Current run latency {current_latency}usec is not less than {threshold} of "
415                 "previous value ({old_latency}usec)".format(
416                     current_latency=Formatter.standard(current_latency), threshold=threshold,
417                     old_latency=Formatter.standard(old_latency)))
418     elif reference_values:
419         if not reference_values[0] <= current_latency <= reference_values[1]:
420             raise AssertionError(
421                 "Current run latency {current_latency}usec is not in reference values "
422                 "[{min_reference_value}, {max_reference_value}]".format(
423                     current_latency=Formatter.standard(current_latency),
424                     min_reference_value=Formatter.standard(reference_values[0]),
425                     max_reference_value=Formatter.standard(reference_values[1])))
426
427
428 def get_result_from_input_values(input, result):
429     # Select required keys (other keys can be not set or unconsistent between scenarios)
430     required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
431     if 'user_label' in result:
432         required_keys.append('user_label')
433     if 'flavor_type' in result:
434         required_keys.append('flavor_type')
435     subset_input = dict((k, input[k]) for k in required_keys if k in input)
436     subset_result = dict((k, result[k]) for k in required_keys if k in result)
437     return subset_input == subset_result
438
439
440 def extract_value(obj, key):
441     """Pull all values of specified key from nested JSON."""
442     arr = []
443
444     def extract(obj, arr, key):
445         """Recursively search for values of key in JSON tree."""
446         if isinstance(obj, dict):
447             for k, v in obj.items():
448                 if k == key:
449                     arr.append(v)
450                 elif isinstance(v, (dict, list)):
451                     extract(v, arr, key)
452         elif isinstance(obj, list):
453             for item in obj:
454                 extract(item, arr, key)
455         return arr
456
457     results = extract(obj, arr, key)
458     return results[0]
459
460
461 def get_last_result(context, reference=None, page=None):
462     if reference:
463         case_name = 'characterization'
464     else:
465         case_name = context.CASE_NAME
466     url = context.data['TEST_DB_URL'] + '?project={project_name}&case={case_name}'.format(
467         project_name=context.data['PROJECT_NAME'], case_name=case_name)
468     if context.data['INSTALLER_TYPE']:
469         url += '&installer={installer_name}'.format(installer_name=context.data['INSTALLER_TYPE'])
470     if context.data['NODE_NAME']:
471         url += '&pod={pod_name}'.format(pod_name=context.data['NODE_NAME'])
472     url += '&criteria=PASS'
473     if page:
474         url += '&page={page}'.format(page=page)
475     last_results = requests.get(url)
476     assert last_results.status_code == 200
477     last_results = json.loads(last_results.text)
478     for result in last_results["results"]:
479         for tagged_result in result["details"]["results"][context.tag]:
480             if get_result_from_input_values(tagged_result["input"], context.json):
481                 return tagged_result
482     if last_results["pagination"]["current_page"] < last_results["pagination"]["total_pages"]:
483         page = last_results["pagination"]["current_page"] + 1
484         return get_last_result(context, page)
485     return None