Compare the latency result with a fixed threshold of 1ms 80/72780/2
authorGwenael Lambrouin <gwenael.lambrouin@orange.com>
Mon, 28 Jun 2021 16:10:41 +0000 (18:10 +0200)
committerGwenael Lambrouin <gwenael.lambrouin@orange.com>
Thu, 22 Jul 2021 14:37:55 +0000 (16:37 +0200)
Change-Id: I2b4ea4ee6e6442d4ceac268e7bf3c6bf9277ff54
Signed-off-by: Gwenael Lambrouin <gwenael.lambrouin@orange.com>
behave_tests/features/non-regression.feature
behave_tests/features/steps/steps.py

index 89c3c4d..62daafa 100644 (file)
@@ -31,8 +31,7 @@ Feature: non-regression
       When NFVbench API is ready
       Then run is started and waiting for result
       And push result to database
       When NFVbench API is ready
       Then run is started and waiting for result
       And push result to database
-      And verify latency result is in same range as the previous result
-      And verify latency result is in same range as the characterization result
+      And verify latency result is lower than 1000 microseconds
 
      Examples: Frame sizes and throughput percentages
       | frame_size | throughput |
 
      Examples: Frame sizes and throughput percentages
       | frame_size | throughput |
index 8798280..965b0c8 100644 (file)
@@ -226,6 +226,36 @@ def get_latency_result_from_database(context, threshold='90%'):
     if last_result:
         compare_latency_values(context, last_result, threshold)
 
     if last_result:
         compare_latency_values(context, last_result, threshold)
 
+
+@then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
+def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
+    """Check latency result against a fixed threshold.
+
+    Check that the average latency measured during the current scenario run is
+    lower or equal to the provided fixed reference value.
+
+    Args:
+        context: The context data of the current scenario run.  It includes the
+            test results for that run.
+
+        max_avg_latency_usec: Reference value to be used as a threshold.  This
+            is a maximum average latency expressed in microseconds.
+
+    Raises:
+        AssertionError: The latency result is strictly greater than the reference value.
+
+    """
+    # Get the just measured average latency (a float):
+    new_avg_latency_usec = context.synthesis['avg_delay_usec']
+
+    # Compare measured value to reference:
+    if new_avg_latency_usec > max_avg_latency_usec:
+        raise AssertionError("Average latency higher than max threshold: "
+                             "{avg_latency} usec > {threshold} usec".format(
+                                 avg_latency=round(new_avg_latency_usec),
+                                 threshold=round(max_avg_latency_usec)))
+
+
 @then(
     'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
 def compare_throughput_pps_result_with_range_values(context, min_reference_value,
 @then(
     'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
 def compare_throughput_pps_result_with_range_values(context, min_reference_value,