5762fa2b07ef06b98f9e8ff37512a0acad7fdd29
[nfvbench.git] / behave_tests / features / steps / steps.py
1 #!/usr/bin/env python
2 # Copyright 2021 Orange
3 #
4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
5 #    not use this file except in compliance with the License. You may obtain
6 #    a copy of the License at
7 #
8 #         http://www.apache.org/licenses/LICENSE-2.0
9 #
10 #    Unless required by applicable law or agreed to in writing, software
11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 #    License for the specific language governing permissions and limitations
14 #    under the License.
15 #
16
17 from functools import reduce
18
19 from behave import given
20 from behave import when
21 from behave import then
22 from requests import RequestException
23 from retry import retry
24 import json
25 import requests
26 import subprocess
27 from subprocess import DEVNULL
28
29 from nfvbench.summarizer import Formatter
30 from nfvbench.traffic_gen.traffic_utils import parse_rate_str
31
32 STATUS_ERROR = "ERROR"
33
34 STATUS_OK = "OK"
35
36
37 """Given steps."""
38
39
40 @given('PROJECT_NAME: {project_name}')
41 def override_xtesting_project_name(context, project_name):
42     context.data['PROJECT_NAME'] = project_name
43
44
45 @given('TEST_DB_EXT_URL: {test_db_ext_url}')
46 def override_xtesting_test_db_ext_url(context, test_db_ext_url):
47     context.data['TEST_DB_EXT_URL'] = test_db_ext_url
48
49
50 @given('TEST_DB_URL: {test_db_url}')
51 def override_xtesting_test_db_url(context, test_db_url):
52     context.data['TEST_DB_URL'] = test_db_url
53     context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
54
55
56 @given('INSTALLER_TYPE: {installer_type}')
57 def override_xtesting_installer_type(context, installer_type):
58     context.data['INSTALLER_TYPE'] = installer_type
59
60
61 @given('DEPLOY_SCENARIO: {deploy_scenario}')
62 def override_xtesting_deploy_scenario(context, deploy_scenario):
63     context.data['DEPLOY_SCENARIO'] = deploy_scenario
64
65
66 @given('NODE_NAME: {node_name}')
67 def override_xtesting_node_name(context, node_name):
68     context.data['NODE_NAME'] = node_name
69
70
71 @given('BUILD_TAG: {build_tag}')
72 def override_xtesting_build_tag(context, build_tag):
73     context.data['BUILD_TAG'] = build_tag
74
75
76 @given('NFVbench config from file: {config_path}')
77 def init_config(context, config_path):
78     context.data['config'] = config_path
79
80
81 @given('a JSON NFVbench config')
82 def init_config_from_json(context):
83     context.json.update(json.loads(context.text))
84
85
86 @given('log file: {log_file_path}')
87 def log_config(context, log_file_path):
88     context.json['log_file'] = log_file_path
89
90
91 @given('json file: {json_file_path}')
92 def json_config(context, json_file_path):
93     context.json['json'] = json_file_path
94
95
96 @given('no clean up')
97 def add_no_clean_up_flag(context):
98     context.json['no_cleanup'] = 'true'
99
100
101 @given('TRex is restarted')
102 def add_restart(context):
103     context.json['restart'] = 'true'
104
105
106 @given('{label} label')
107 def add_label(context, label):
108     context.json['label'] = label
109
110
111 @given('{frame_size} frame size')
112 def add_frame_size(context, frame_size):
113     context.json['frame_sizes'] = [frame_size]
114
115
116 @given('{flow_count} flow count')
117 def add_flow_count(context, flow_count):
118     context.json['flow_count'] = flow_count
119
120
121 @given('{rate} rate')
122 def add_rate(context, rate):
123     context.json['rate'] = rate
124
125
126 @given('{duration} sec run duration')
127 def add_duration(context, duration):
128     context.json['duration_sec'] = duration
129
130
131 @given('{percentage_rate} rate of previous scenario')
132 def add_percentage_rate(context, percentage_rate):
133     context.percentage_rate = percentage_rate
134     rate = percentage_previous_rate(context, percentage_rate)
135     context.json['rate'] = rate
136
137
138 """When steps."""
139
140
141 @when('NFVbench API is ready')
142 @when('NFVbench API is ready on host {host_ip}')
143 @when('NFVbench API is ready on host {host_ip} and port {port:d}')
144 def start_server(context, host_ip="127.0.0.1", port=7555):
145     context.host_ip = host_ip
146     context.port = port
147     try:
148         # check if API is already available
149         requests.get(
150             "http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
151     except RequestException:
152         cmd = ["nfvbench", "-c", context.data['config'], "--server"]
153         if host_ip != "127.0.0.1":
154             cmd.append("--host")
155             cmd.append(host_ip)
156         if port != 7555:
157             cmd.append("--port")
158             cmd.append(port)
159
160         subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
161
162     test_nfvbench_api(context)
163
164
165 """Then steps."""
166
167
168 @then('run is started and waiting for result')
169 @then('{repeat:d} runs are started and waiting for maximum result')
170 def step_impl(context, repeat=1):
171     results = []
172     if 'json' not in context.json:
173         context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
174                                '/nfvbench-' + context.tag + '-fs_' + \
175                                context.json['frame_sizes'][0] + '-fc_' + \
176                                context.json['flow_count'] + '-rate_' + \
177                                context.json['rate'] + '.json'
178     json_base_name = context.json['json']
179     for i in range(repeat):
180         if repeat > 1:
181             context.json['json'] = json_base_name.strip('.json') + '-' + str(i) + '.json'
182
183         url = "http://{ip}:{port}/start_run".format(ip=context.host_ip, port=context.port)
184         payload = json.dumps(context.json)
185         r = requests.post(url, data=payload, headers={'Content-Type': 'application/json'})
186         context.request_id = json.loads(r.text)["request_id"]
187         assert r.status_code == 200
188         result = wait_result(context)
189         results.append(result)
190         assert result["status"] == STATUS_OK
191
192
193     context.result = reduce(
194         lambda x, y: x if extract_value(x, "total_tx_rate") > extract_value(y,
195                                                                             "total_tx_rate") else y,
196         results)
197
198     total_tx_rate = extract_value(context.result, "total_tx_rate")
199     context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
200     overall = extract_value(context.result, "overall")
201     avg_delay_usec = extract_value(overall, "avg_delay_usec")
202     # create a synthesis with offered pps and latency values
203     context.synthesis['total_tx_rate'] = total_tx_rate
204     context.synthesis['avg_delay_usec'] = avg_delay_usec
205
206
207 @then('extract offered rate result')
208 def save_rate_result(context):
209     total_tx_rate = extract_value(context.result, "total_tx_rate")
210     context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
211
212
213 @then('verify throughput result is in same range as the previous result')
214 @then('verify throughput result is greater than {threshold} of the previous result')
215 def get_throughput_result_from_database(context, threshold='90%'):
216     last_result = get_last_result(context)
217
218     if last_result:
219         compare_throughput_values(context, last_result, threshold)
220
221
222 @then('verify latency result is in same range as the previous result')
223 @then('verify latency result is greater than {threshold} of the previous result')
224 def get_latency_result_from_database(context, threshold='90%'):
225     last_result = get_last_result(context)
226
227     if last_result:
228         compare_latency_values(context, last_result, threshold)
229
230 @then(
231     'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
232 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
233                                                     max_reference_value):
234     context.unit = 'pps'
235     reference_values = [min_reference_value + 'pps', max_reference_value + 'pps']
236     throughput_comparison(context, reference_values=reference_values)
237
238
239 @then(
240     'verify result is in [{min_reference_value}bps, {max_reference_value}bps] range for throughput')
241 def compare_throughput_bps_result_with_range_values(context, min_reference_value,
242                                                     max_reference_value):
243     context.unit = 'bps'
244     reference_values = [min_reference_value + 'bps', max_reference_value + 'bps']
245     throughput_comparison(context, reference_values=reference_values)
246
247
248 @then('verify result is in {reference_values} range for latency')
249 def compare_result_with_range_values(context, reference_values):
250     latency_comparison(context, reference_values=reference_values)
251
252
253 @then('verify throughput result is in same range as the characterization result')
254 @then('verify throughput result is greater than {threshold} of the characterization result')
255 def get_characterization_throughput_result_from_database(context, threshold='90%'):
256     last_result = get_last_result(context, True)
257     if not last_result:
258         raise AssertionError("No characterization result found.")
259     compare_throughput_values(context, last_result, threshold)
260
261
262 @then('verify latency result is in same range as the characterization result')
263 @then('verify latency result is greater than {threshold} of the characterization result')
264 def get_characterization_latency_result_from_database(context, threshold='90%'):
265     last_result = get_last_result(context, True)
266     if not last_result:
267         raise AssertionError("No characterization result found.")
268     compare_latency_values(context, last_result, threshold)
269
270 @then('push result to database')
271 def push_result_database(context):
272     if context.tag == "latency":
273         # override input rate value with percentage one to avoid no match
274         # if pps is not accurate with previous one
275         context.json["rate"] = context.percentage_rate
276     json_result = {"synthesis": context.synthesis, "input": context.json, "output": context.result}
277
278     if context.tag not in context.results:
279         context.results[context.tag] = [json_result]
280     else:
281         context.results[context.tag].append(json_result)
282
283
284 """Utils methods."""
285
286
287 @retry(AssertionError, tries=24, delay=5.0, logger=None)
288 def test_nfvbench_api(context):
289     try:
290         r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
291         assert r.status_code == 200
292         assert json.loads(r.text)["error_message"] == "no pending NFVbench run"
293     except RequestException as exc:
294         raise AssertionError("Fail to access NFVbench API") from exc
295
296
297 @retry(AssertionError, tries=1000, delay=2.0, logger=None)
298 def wait_result(context):
299     r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
300     context.raw_result = r.text
301     result = json.loads(context.raw_result)
302     assert r.status_code == 200
303     assert result["status"] == STATUS_OK or result["status"] == STATUS_ERROR
304     return result
305
306
307 def percentage_previous_rate(context, rate):
308     previous_rate = context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']]
309
310     if rate.endswith('%'):
311         rate_percent = convert_percentage_str_to_float(rate)
312         return str(int(previous_rate * rate_percent)) + 'pps'
313     raise Exception('Unknown rate string format %s' % rate)
314
315
316 def convert_percentage_str_to_float(percentage):
317     float_percent = float(percentage.replace('%', '').strip())
318     if float_percent <= 0 or float_percent > 100.0:
319         raise Exception('%s is out of valid range (must be 1-100%%)' % percentage)
320     return float_percent / 100
321
322
323 def compare_throughput_values(context, last_result, threshold):
324     assert last_result["output"]["status"] == context.result["status"]
325     if last_result["output"]["status"] == "OK":
326         old_throughput = extract_value(last_result["output"], "total_tx_rate")
327         throughput_comparison(context, old_throughput, threshold=threshold)
328
329
330 def compare_latency_values(context, last_result, threshold):
331     assert last_result["output"]["status"] == context.result["status"]
332     if last_result["output"]["status"] == "OK":
333         old_latency = extract_value(extract_value(last_result["output"], "overall"),
334                                     "avg_delay_usec")
335         latency_comparison(context, old_latency, threshold=threshold)
336
337
338 def throughput_comparison(context, old_throughput_pps=None, threshold=None, reference_values=None):
339     current_throughput_pps = extract_value(context.result, "total_tx_rate")
340
341     if old_throughput_pps:
342         if not current_throughput_pps >= convert_percentage_str_to_float(
343                 threshold) * old_throughput_pps:
344             raise AssertionError(
345                 "Current run throughput {current_throughput_pps} is not over {threshold} "
346                 " of previous value ({old_throughput_pps})".format(
347                     current_throughput_pps=Formatter.suffix('pps')(
348                         Formatter.standard(current_throughput_pps)),
349                     threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(
350                         Formatter.standard(old_throughput_pps))))
351     elif reference_values:
352         if context.unit == 'bps':
353             current_throughput = extract_value(context.result, "offered_tx_rate_bps")
354             reference_values = [int(parse_rate_str(x)['rate_bps']) for x in reference_values]
355             formatted_current_throughput = Formatter.bits(current_throughput)
356             formatted_min_reference_value = Formatter.bits(reference_values[0])
357             formatted_max_reference_value = Formatter.bits(reference_values[1])
358         else:
359             current_throughput = current_throughput_pps
360             reference_values = [int(parse_rate_str(x)['rate_pps']) for x in reference_values]
361             formatted_current_throughput = Formatter.suffix('pps')(
362                 Formatter.standard(current_throughput))
363             formatted_min_reference_value = Formatter.suffix('pps')(
364                 Formatter.standard(reference_values[0]))
365             formatted_max_reference_value = Formatter.suffix('pps')(
366                 Formatter.standard(reference_values[1]))
367         if not reference_values[0] <= int(current_throughput) <= reference_values[1]:
368             raise AssertionError(
369                 "Current run throughput {current_throughput} is not in reference values "
370                 "[{min_reference_value}, {max_reference_value}]".format(
371                     current_throughput=formatted_current_throughput,
372                     min_reference_value=formatted_min_reference_value,
373                     max_reference_value=formatted_max_reference_value))
374
375
376 def latency_comparison(context, old_latency=None, threshold=None, reference_values=None):
377     overall = extract_value(context.result, "overall")
378     current_latency = extract_value(overall, "avg_delay_usec")
379
380     if old_latency:
381         if not current_latency <= (2 - convert_percentage_str_to_float(threshold)) * old_latency:
382             threshold = str(200 - int(threshold.strip('%'))) + '%'
383             raise AssertionError(
384                 "Current run latency {current_latency}usec is not less than {threshold} of "
385                 "previous value ({old_latency}usec)".format(
386                     current_latency=Formatter.standard(current_latency), threshold=threshold,
387                     old_latency=Formatter.standard(old_latency)))
388     elif reference_values:
389         if not reference_values[0] <= current_latency <= reference_values[1]:
390             raise AssertionError(
391                 "Current run latency {current_latency}usec is not in reference values "
392                 "[{min_reference_value}, {max_reference_value}]".format(
393                     current_latency=Formatter.standard(current_latency),
394                     min_reference_value=Formatter.standard(reference_values[0]),
395                     max_reference_value=Formatter.standard(reference_values[1])))
396
397
398 def get_result_from_input_values(input, result):
399     # Select required keys (other keys can be not set or unconsistent between scenarios)
400     required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
401     if 'user_label' in result:
402         required_keys.append('user_label')
403     if 'flavor_type' in result:
404         required_keys.append('flavor_type')
405     subset_input = dict((k, input[k]) for k in required_keys if k in input)
406     subset_result = dict((k, result[k]) for k in required_keys if k in result)
407     return subset_input == subset_result
408
409
410 def extract_value(obj, key):
411     """Pull all values of specified key from nested JSON."""
412     arr = []
413
414     def extract(obj, arr, key):
415         """Recursively search for values of key in JSON tree."""
416         if isinstance(obj, dict):
417             for k, v in obj.items():
418                 if k == key:
419                     arr.append(v)
420                 elif isinstance(v, (dict, list)):
421                     extract(v, arr, key)
422         elif isinstance(obj, list):
423             for item in obj:
424                 extract(item, arr, key)
425         return arr
426
427     results = extract(obj, arr, key)
428     return results[0]
429
430
431 def get_last_result(context, reference=None, page=None):
432     if reference:
433         case_name = 'characterization'
434     else:
435         case_name = context.CASE_NAME
436     url = context.data['TEST_DB_URL'] + '?project={project_name}&case={case_name}'.format(
437         project_name=context.data['PROJECT_NAME'], case_name=case_name)
438     if context.data['INSTALLER_TYPE']:
439         url += '&installer={installer_name}'.format(installer_name=context.data['INSTALLER_TYPE'])
440     if context.data['NODE_NAME']:
441         url += '&pod={pod_name}'.format(pod_name=context.data['NODE_NAME'])
442     url += '&criteria=PASS'
443     if page:
444         url += '&page={page}'.format(page=page)
445     last_results = requests.get(url)
446     assert last_results.status_code == 200
447     last_results = json.loads(last_results.text)
448     for result in last_results["results"]:
449         for tagged_result in result["details"]["results"][context.tag]:
450             if get_result_from_input_values(tagged_result["input"], context.json):
451                 return tagged_result
452     if last_results["pagination"]["current_page"] < last_results["pagination"]["total_pages"]:
453         page = last_results["pagination"]["current_page"] + 1
454         return get_last_result(context, page)
455     return None