behave_tests: configure nfvbench ip/port with env vars
[nfvbench.git] / behave_tests / features / steps / steps.py
1 #!/usr/bin/env python
2 # Copyright 2021 Orange
3 #
4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
5 #    not use this file except in compliance with the License. You may obtain
6 #    a copy of the License at
7 #
8 #         http://www.apache.org/licenses/LICENSE-2.0
9 #
10 #    Unless required by applicable law or agreed to in writing, software
11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 #    License for the specific language governing permissions and limitations
14 #    under the License.
15 #
16
17 from functools import reduce
18
19 from behave import given
20 from behave import when
21 from behave import then
22 from requests import RequestException
23 from retry import retry
24 import json
25 import requests
26 import subprocess
27 from subprocess import DEVNULL
28 from typing import Optional
29
30 from nfvbench.summarizer import Formatter
31 from nfvbench.traffic_gen.traffic_utils import parse_rate_str
32
33 STATUS_ERROR = "ERROR"
34
35 STATUS_OK = "OK"
36
37
38 """Given steps."""
39
40
41 @given('PROJECT_NAME: {project_name}')
42 def override_xtesting_project_name(context, project_name):
43     context.data['PROJECT_NAME'] = project_name
44
45
46 @given('TEST_DB_EXT_URL: {test_db_ext_url}')
47 def override_xtesting_test_db_ext_url(context, test_db_ext_url):
48     context.data['TEST_DB_EXT_URL'] = test_db_ext_url
49
50
51 @given('TEST_DB_URL: {test_db_url}')
52 def override_xtesting_test_db_url(context, test_db_url):
53     context.data['TEST_DB_URL'] = test_db_url
54     context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
55
56
57 @given('INSTALLER_TYPE: {installer_type}')
58 def override_xtesting_installer_type(context, installer_type):
59     context.data['INSTALLER_TYPE'] = installer_type
60
61
62 @given('DEPLOY_SCENARIO: {deploy_scenario}')
63 def override_xtesting_deploy_scenario(context, deploy_scenario):
64     context.data['DEPLOY_SCENARIO'] = deploy_scenario
65
66
67 @given('NODE_NAME: {node_name}')
68 def override_xtesting_node_name(context, node_name):
69     context.data['NODE_NAME'] = node_name
70
71
72 @given('BUILD_TAG: {build_tag}')
73 def override_xtesting_build_tag(context, build_tag):
74     context.data['BUILD_TAG'] = build_tag
75
76
77 @given('NFVbench config from file: {config_path}')
78 def init_config(context, config_path):
79     context.data['config'] = config_path
80
81
82 @given('a JSON NFVbench config')
83 def init_config_from_json(context):
84     context.json.update(json.loads(context.text))
85
86
87 @given('log file: {log_file_path}')
88 def log_config(context, log_file_path):
89     context.json['log_file'] = log_file_path
90
91
92 @given('json file: {json_file_path}')
93 def json_config(context, json_file_path):
94     context.json['json'] = json_file_path
95
96
97 @given('no clean up')
98 def add_no_clean_up_flag(context):
99     context.json['no_cleanup'] = 'true'
100
101
102 @given('TRex is restarted')
103 def add_restart(context):
104     context.json['restart'] = 'true'
105
106
107 @given('{label} label')
108 def add_label(context, label):
109     context.json['label'] = label
110
111
112 @given('{frame_size} frame size')
113 def add_frame_size(context, frame_size):
114     context.json['frame_sizes'] = [frame_size]
115
116
117 @given('{flow_count} flow count')
118 def add_flow_count(context, flow_count):
119     context.json['flow_count'] = flow_count
120
121
122 @given('{rate} rate')
123 def add_rate(context, rate):
124     context.json['rate'] = rate
125
126
127 @given('{duration} sec run duration')
128 def add_duration(context, duration):
129     context.json['duration_sec'] = duration
130
131
132 @given('{percentage_rate} rate of previous scenario')
133 def add_percentage_rate(context, percentage_rate):
134     context.percentage_rate = percentage_rate
135     rate = percentage_previous_rate(context, percentage_rate)
136     context.json['rate'] = rate
137
138
139 """When steps."""
140
141
142 @when('NFVbench API is ready')
143 @when('NFVbench API is ready on host {host_ip}')
144 @when('NFVbench API is ready on host {host_ip} and port {port:d}')
145 def start_server(context, host_ip: Optional[str]=None, port: Optional[int]=None):
146     # NFVbench server host IP and port number have been setup from environment variables (see
147     # environment.py:before_all()).   Here we allow to override them from feature files:
148     if host_ip is not None:
149         context.host_ip = host_ip
150     if port is not None:
151         context.port = port
152
153     try:
154         # check if API is already available
155         requests.get(
156             "http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
157     except RequestException:
158         cmd = ["nfvbench", "-c", context.data['config'], "--server"]
159         if context.host_ip != "127.0.0.1":
160             cmd.append("--host")
161             cmd.append(context.host_ip)
162         if context.port != 7555:
163             cmd.append("--port")
164             cmd.append(str(context.port))
165
166         subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
167
168     test_nfvbench_api(context)
169
170
171 """Then steps."""
172
173
174 @then('run is started and waiting for result')
175 @then('{repeat:d} runs are started and waiting for maximum result')
176 def step_impl(context, repeat=1):
177     results = []
178     if 'json' not in context.json:
179         context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
180                                '/nfvbench-' + context.tag + '-fs_' + \
181                                context.json['frame_sizes'][0] + '-fc_' + \
182                                context.json['flow_count'] + '-rate_' + \
183                                context.json['rate'] + '.json'
184     json_base_name = context.json['json']
185     for i in range(repeat):
186         if repeat > 1:
187             context.json['json'] = json_base_name.strip('.json') + '-' + str(i) + '.json'
188
189         url = "http://{ip}:{port}/start_run".format(ip=context.host_ip, port=context.port)
190         payload = json.dumps(context.json)
191         r = requests.post(url, data=payload, headers={'Content-Type': 'application/json'})
192         context.request_id = json.loads(r.text)["request_id"]
193         assert r.status_code == 200
194         result = wait_result(context)
195         results.append(result)
196         assert result["status"] == STATUS_OK
197
198
199     context.result = reduce(
200         lambda x, y: x if extract_value(x, "total_tx_rate") > extract_value(y,
201                                                                             "total_tx_rate") else y,
202         results)
203
204     total_tx_rate = extract_value(context.result, "total_tx_rate")
205     overall = extract_value(context.result, "overall")
206     avg_delay_usec = extract_value(overall, "avg_delay_usec")
207     # create a synthesis with offered pps and latency values
208     context.synthesis['total_tx_rate'] = total_tx_rate
209     context.synthesis['avg_delay_usec'] = avg_delay_usec
210
211
212 @then('extract offered rate result')
213 def save_rate_result(context):
214     total_tx_rate = extract_value(context.result, "total_tx_rate")
215     context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
216
217
218 @then('verify throughput result is in same range as the previous result')
219 @then('verify throughput result is greater than {threshold} of the previous result')
220 def get_throughput_result_from_database(context, threshold='90%'):
221     last_result = get_last_result(context)
222
223     if last_result:
224         compare_throughput_values(context, last_result, threshold)
225
226
227 @then('verify latency result is in same range as the previous result')
228 @then('verify latency result is greater than {threshold} of the previous result')
229 def get_latency_result_from_database(context, threshold='90%'):
230     last_result = get_last_result(context)
231
232     if last_result:
233         compare_latency_values(context, last_result, threshold)
234
235
236 @then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
237 def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
238     """Check latency result against a fixed threshold.
239
240     Check that the average latency measured during the current scenario run is
241     lower or equal to the provided fixed reference value.
242
243     Args:
244         context: The context data of the current scenario run.  It includes the
245             test results for that run.
246
247         max_avg_latency_usec: Reference value to be used as a threshold.  This
248             is a maximum average latency expressed in microseconds.
249
250     Raises:
251         AssertionError: The latency result is strictly greater than the reference value.
252
253     """
254     # Get the just measured average latency (a float):
255     new_avg_latency_usec = context.synthesis['avg_delay_usec']
256
257     # Compare measured value to reference:
258     if new_avg_latency_usec > max_avg_latency_usec:
259         raise AssertionError("Average latency higher than max threshold: "
260                              "{avg_latency} usec > {threshold} usec".format(
261                                  avg_latency=round(new_avg_latency_usec),
262                                  threshold=round(max_avg_latency_usec)))
263
264
265 @then(
266     'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
267 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
268                                                     max_reference_value):
269     context.unit = 'pps'
270     reference_values = [min_reference_value + 'pps', max_reference_value + 'pps']
271     throughput_comparison(context, reference_values=reference_values)
272
273
274 @then(
275     'verify result is in [{min_reference_value}bps, {max_reference_value}bps] range for throughput')
276 def compare_throughput_bps_result_with_range_values(context, min_reference_value,
277                                                     max_reference_value):
278     context.unit = 'bps'
279     reference_values = [min_reference_value + 'bps', max_reference_value + 'bps']
280     throughput_comparison(context, reference_values=reference_values)
281
282
283 @then('verify result is in {reference_values} range for latency')
284 def compare_result_with_range_values(context, reference_values):
285     latency_comparison(context, reference_values=reference_values)
286
287
288 @then('verify throughput result is in same range as the characterization result')
289 @then('verify throughput result is greater than {threshold} of the characterization result')
290 def get_characterization_throughput_result_from_database(context, threshold='90%'):
291     last_result = get_last_result(context, True)
292     if not last_result:
293         raise AssertionError("No characterization result found.")
294     compare_throughput_values(context, last_result, threshold)
295
296
297 @then('verify latency result is in same range as the characterization result')
298 @then('verify latency result is greater than {threshold} of the characterization result')
299 def get_characterization_latency_result_from_database(context, threshold='90%'):
300     last_result = get_last_result(context, True)
301     if not last_result:
302         raise AssertionError("No characterization result found.")
303     compare_latency_values(context, last_result, threshold)
304
305 @then('push result to database')
306 def push_result_database(context):
307     if context.tag == "latency":
308         # override input rate value with percentage one to avoid no match
309         # if pps is not accurate with previous one
310         context.json["rate"] = context.percentage_rate
311     json_result = {"synthesis": context.synthesis, "input": context.json, "output": context.result}
312
313     if context.tag not in context.results:
314         context.results[context.tag] = [json_result]
315     else:
316         context.results[context.tag].append(json_result)
317
318
319 """Utils methods."""
320
321
322 @retry(AssertionError, tries=24, delay=5.0, logger=None)
323 def test_nfvbench_api(context):
324     try:
325         r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
326         assert r.status_code == 200
327         assert json.loads(r.text)["error_message"] == "no pending NFVbench run"
328     except RequestException as exc:
329         raise AssertionError("Fail to access NFVbench API") from exc
330
331
332 @retry(AssertionError, tries=1000, delay=2.0, logger=None)
333 def wait_result(context):
334     r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
335     context.raw_result = r.text
336     result = json.loads(context.raw_result)
337     assert r.status_code == 200
338     assert result["status"] == STATUS_OK or result["status"] == STATUS_ERROR
339     return result
340
341
342 def percentage_previous_rate(context, rate):
343     previous_rate = context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']]
344
345     if rate.endswith('%'):
346         rate_percent = convert_percentage_str_to_float(rate)
347         return str(int(previous_rate * rate_percent)) + 'pps'
348     raise Exception('Unknown rate string format %s' % rate)
349
350
351 def convert_percentage_str_to_float(percentage):
352     float_percent = float(percentage.replace('%', '').strip())
353     if float_percent <= 0 or float_percent > 100.0:
354         raise Exception('%s is out of valid range (must be 1-100%%)' % percentage)
355     return float_percent / 100
356
357
358 def compare_throughput_values(context, last_result, threshold):
359     assert last_result["output"]["status"] == context.result["status"]
360     if last_result["output"]["status"] == "OK":
361         old_throughput = extract_value(last_result["output"], "total_tx_rate")
362         throughput_comparison(context, old_throughput, threshold=threshold)
363
364
365 def compare_latency_values(context, last_result, threshold):
366     assert last_result["output"]["status"] == context.result["status"]
367     if last_result["output"]["status"] == "OK":
368         old_latency = extract_value(extract_value(last_result["output"], "overall"),
369                                     "avg_delay_usec")
370         latency_comparison(context, old_latency, threshold=threshold)
371
372
373 def throughput_comparison(context, old_throughput_pps=None, threshold=None, reference_values=None):
374     current_throughput_pps = extract_value(context.result, "total_tx_rate")
375
376     if old_throughput_pps:
377         if not current_throughput_pps >= convert_percentage_str_to_float(
378                 threshold) * old_throughput_pps:
379             raise AssertionError(
380                 "Current run throughput {current_throughput_pps} is not over {threshold} "
381                 " of previous value ({old_throughput_pps})".format(
382                     current_throughput_pps=Formatter.suffix('pps')(
383                         Formatter.standard(current_throughput_pps)),
384                     threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(
385                         Formatter.standard(old_throughput_pps))))
386     elif reference_values:
387         if context.unit == 'bps':
388             current_throughput = extract_value(context.result, "offered_tx_rate_bps")
389             reference_values = [int(parse_rate_str(x)['rate_bps']) for x in reference_values]
390             formatted_current_throughput = Formatter.bits(current_throughput)
391             formatted_min_reference_value = Formatter.bits(reference_values[0])
392             formatted_max_reference_value = Formatter.bits(reference_values[1])
393         else:
394             current_throughput = current_throughput_pps
395             reference_values = [int(parse_rate_str(x)['rate_pps']) for x in reference_values]
396             formatted_current_throughput = Formatter.suffix('pps')(
397                 Formatter.standard(current_throughput))
398             formatted_min_reference_value = Formatter.suffix('pps')(
399                 Formatter.standard(reference_values[0]))
400             formatted_max_reference_value = Formatter.suffix('pps')(
401                 Formatter.standard(reference_values[1]))
402         if not reference_values[0] <= int(current_throughput) <= reference_values[1]:
403             raise AssertionError(
404                 "Current run throughput {current_throughput} is not in reference values "
405                 "[{min_reference_value}, {max_reference_value}]".format(
406                     current_throughput=formatted_current_throughput,
407                     min_reference_value=formatted_min_reference_value,
408                     max_reference_value=formatted_max_reference_value))
409
410
411 def latency_comparison(context, old_latency=None, threshold=None, reference_values=None):
412     overall = extract_value(context.result, "overall")
413     current_latency = extract_value(overall, "avg_delay_usec")
414
415     if old_latency:
416         if not current_latency <= (2 - convert_percentage_str_to_float(threshold)) * old_latency:
417             threshold = str(200 - int(threshold.strip('%'))) + '%'
418             raise AssertionError(
419                 "Current run latency {current_latency}usec is not less than {threshold} of "
420                 "previous value ({old_latency}usec)".format(
421                     current_latency=Formatter.standard(current_latency), threshold=threshold,
422                     old_latency=Formatter.standard(old_latency)))
423     elif reference_values:
424         if not reference_values[0] <= current_latency <= reference_values[1]:
425             raise AssertionError(
426                 "Current run latency {current_latency}usec is not in reference values "
427                 "[{min_reference_value}, {max_reference_value}]".format(
428                     current_latency=Formatter.standard(current_latency),
429                     min_reference_value=Formatter.standard(reference_values[0]),
430                     max_reference_value=Formatter.standard(reference_values[1])))
431
432
433 def get_result_from_input_values(input, result):
434     # Select required keys (other keys can be not set or unconsistent between scenarios)
435     required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
436     if 'user_label' in result:
437         required_keys.append('user_label')
438     if 'flavor_type' in result:
439         required_keys.append('flavor_type')
440     subset_input = dict((k, input[k]) for k in required_keys if k in input)
441     subset_result = dict((k, result[k]) for k in required_keys if k in result)
442     return subset_input == subset_result
443
444
445 def extract_value(obj, key):
446     """Pull all values of specified key from nested JSON."""
447     arr = []
448
449     def extract(obj, arr, key):
450         """Recursively search for values of key in JSON tree."""
451         if isinstance(obj, dict):
452             for k, v in obj.items():
453                 if k == key:
454                     arr.append(v)
455                 elif isinstance(v, (dict, list)):
456                     extract(v, arr, key)
457         elif isinstance(obj, list):
458             for item in obj:
459                 extract(item, arr, key)
460         return arr
461
462     results = extract(obj, arr, key)
463     return results[0]
464
465
466 def get_last_result(context, reference=None, page=None):
467     if reference:
468         case_name = 'characterization'
469     else:
470         case_name = context.CASE_NAME
471     url = context.data['TEST_DB_URL'] + '?project={project_name}&case={case_name}'.format(
472         project_name=context.data['PROJECT_NAME'], case_name=case_name)
473     if context.data['INSTALLER_TYPE']:
474         url += '&installer={installer_name}'.format(installer_name=context.data['INSTALLER_TYPE'])
475     if context.data['NODE_NAME']:
476         url += '&pod={pod_name}'.format(pod_name=context.data['NODE_NAME'])
477     url += '&criteria=PASS'
478     if page:
479         url += '&page={page}'.format(page=page)
480     last_results = requests.get(url)
481     assert last_results.status_code == 200
482     last_results = json.loads(last_results.text)
483     for result in last_results["results"]:
484         for tagged_result in result["details"]["results"][context.tag]:
485             if get_result_from_input_values(tagged_result["input"], context.json):
486                 return tagged_result
487     if last_results["pagination"]["current_page"] < last_results["pagination"]["total_pages"]:
488         page = last_results["pagination"]["current_page"] + 1
489         return get_last_result(context, page)
490     return None