Code Review
/
nfvbench.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
behave_tests: log latency test (fixed threshold)
[nfvbench.git]
/
behave_tests
/
features
/
steps
/
steps.py
diff --git
a/behave_tests/features/steps/steps.py
b/behave_tests/features/steps/steps.py
index
76ed12d
..
a1d29ce
100644
(file)
--- a/
behave_tests/features/steps/steps.py
+++ b/
behave_tests/features/steps/steps.py
@@
-127,6
+127,7
@@
def add_percentage_rate(context, percentage_rate):
context.percentage_rate = percentage_rate
rate = percentage_previous_rate(context, percentage_rate)
context.json['rate'] = rate
context.percentage_rate = percentage_rate
rate = percentage_previous_rate(context, percentage_rate)
context.json['rate'] = rate
+ context.logger.info(f"add_percentage_rate: {percentage_rate} => rate={rate}")
"""When steps."""
"""When steps."""
@@
-266,12
+267,18
@@
def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec:
# Get the just measured average latency (a float):
new_avg_latency_usec = context.synthesis['avg_delay_usec']
# Get the just measured average latency (a float):
new_avg_latency_usec = context.synthesis['avg_delay_usec']
+ # Log what we test:
+ context.logger.info("check_latency_result_against_fixed_threshold(usec): "
+ "{value}<={ref}?".format(
+ value=round(new_avg_latency_usec),
+ ref=round(max_avg_latency_usec)))
+
# Compare measured value to reference:
if new_avg_latency_usec > max_avg_latency_usec:
raise AssertionError("Average latency higher than max threshold: "
# Compare measured value to reference:
if new_avg_latency_usec > max_avg_latency_usec:
raise AssertionError("Average latency higher than max threshold: "
- "{
avg_latency} usec > {threshold
} usec".format(
-
avg_latency
=round(new_avg_latency_usec),
-
threshold
=round(max_avg_latency_usec)))
+ "{
value} usec > {ref
} usec".format(
+
value
=round(new_avg_latency_usec),
+
ref
=round(max_avg_latency_usec)))
@then(
@then(