X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=behave_tests%2Ffeatures%2Fsteps%2Fsteps.py;h=965b0c8193de63532387c4139bba3e83d398535f;hb=93d8fc2cd18822136a4d848c38d934203e53c034;hp=87982806f6a8b07432d14b8a0bfd932fbc589886;hpb=6b8818d15c7f88706ba638df0e5320bc68572e19;p=nfvbench.git diff --git a/behave_tests/features/steps/steps.py b/behave_tests/features/steps/steps.py index 8798280..965b0c8 100644 --- a/behave_tests/features/steps/steps.py +++ b/behave_tests/features/steps/steps.py @@ -226,6 +226,36 @@ def get_latency_result_from_database(context, threshold='90%'): if last_result: compare_latency_values(context, last_result, threshold) + +@then('verify latency result is lower than {max_avg_latency_usec:g} microseconds') +def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float): + """Check latency result against a fixed threshold. + + Check that the average latency measured during the current scenario run is + lower or equal to the provided fixed reference value. + + Args: + context: The context data of the current scenario run. It includes the + test results for that run. + + max_avg_latency_usec: Reference value to be used as a threshold. This + is a maximum average latency expressed in microseconds. + + Raises: + AssertionError: The latency result is strictly greater than the reference value. + + """ + # Get the just measured average latency (a float): + new_avg_latency_usec = context.synthesis['avg_delay_usec'] + + # Compare measured value to reference: + if new_avg_latency_usec > max_avg_latency_usec: + raise AssertionError("Average latency higher than max threshold: " + "{avg_latency} usec > {threshold} usec".format( + avg_latency=round(new_avg_latency_usec), + threshold=round(max_avg_latency_usec))) + + @then( 'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput') def compare_throughput_pps_result_with_range_values(context, min_reference_value,