NFVBENCH-205 - Add behave tests for characterization and non-regression 54/72154/5
authorfmenguy <francoisregis.menguy@orange.com>
Thu, 11 Mar 2021 19:33:36 +0000 (20:33 +0100)
committerfmenguy <francoisregis.menguy@orange.com>
Tue, 27 Apr 2021 14:22:39 +0000 (16:22 +0200)
Change-Id: I708eee21a9fd11e7a276707fb0b43d8598381ce7
Signed-off-by: fmenguy <francoisregis.menguy@orange.com>
20 files changed:
behave_tests/__init__.py [new file with mode: 0644]
behave_tests/behavedriver.py [new file with mode: 0644]
behave_tests/features/__init__.py [new file with mode: 0644]
behave_tests/features/characterization-full.feature [new file with mode: 0644]
behave_tests/features/characterization-samples.feature [new file with mode: 0644]
behave_tests/features/environment.py [new file with mode: 0644]
behave_tests/features/non-regression.feature [new file with mode: 0644]
behave_tests/features/steps/__init__.py [new file with mode: 0644]
behave_tests/features/steps/steps.py [new file with mode: 0644]
docker/Dockerfile
docker/nfvbench-entrypoint.sh
nfvbench/nfvbench.py
nfvbench/utils.py
requirements.txt
setup.cfg
test/mock_trex.py
xtesting/ansible/host_vars/127.0.0.1
xtesting/ansible/site.yml
xtesting/behaveframework.py [new file with mode: 0644]
xtesting/testcases.yaml

diff --git a/behave_tests/__init__.py b/behave_tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/behave_tests/behavedriver.py b/behave_tests/behavedriver.py
new file mode 100644 (file)
index 0000000..ad0c8b7
--- /dev/null
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# Copyright 2021 Orange
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+"""Define classes required to run any Behave test suites."""
+
+from __future__ import division
+
+import json
+import logging
+import os
+
+from xtesting.core.behaveframework import BehaveFramework
+
+__author__ = "François-Régis Menguy <francoisregis.menguy@orange.com>"
+
+
+class BehaveDriver(BehaveFramework):
+    """NFVbench custom BehaveDriver for Xtesting."""
+    # pylint: disable=too-many-instance-attributes
+
+    __logger = logging.getLogger('xtesting.core.behavedriver')
+
+    def __init__(self, **kwargs):
+        super().__init__(**kwargs)
+        self.campaign_json_file = os.path.join(self.res_dir, 'campaign_result.json')
+
+    def extract_nfvbench_results(self):
+        with open(self.campaign_json_file) as stream_:
+            self.details['results'] = json.load(stream_)
+
+    def run(self, **kwargs):
+
+        """Override existing Xtesting BehaveFramework core script run method
+         to extract NFVbench result and push them to DB
+
+        Here are the steps:
+           * run Xtesting behave method:
+            * create the output directories if required,
+            * run behave features with parameters
+            * get the behave results in output.json,
+            * get the nfvbench results in campaign_result.json
+
+        Args:
+            kwargs: Arbitrary keyword arguments.
+
+        Returns:
+            EX_OK if all suites ran well.
+            EX_RUN_ERROR otherwise.
+        """
+        try:
+            super().run(**kwargs)
+            self.extract_nfvbench_results()
+            self.__logger.info("NFVbench results were successfully parsed")
+        except Exception:  # pylint: disable=broad-except
+            self.__logger.exception("Cannot parse NFVbench results")
+            return self.EX_RUN_ERROR
+        return self.EX_OK
diff --git a/behave_tests/features/__init__.py b/behave_tests/features/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/behave_tests/features/characterization-full.feature b/behave_tests/features/characterization-full.feature
new file mode 100644 (file)
index 0000000..f8bca5f
--- /dev/null
@@ -0,0 +1,70 @@
+@characterization
+Feature: characterization
+
+  @throughput
+  Scenario Outline: Run a NDR test for a defined frame size and flow count
+      Given 10 sec run duration
+      And TRex is restarted
+      And <frame_size> frame size
+      And <flow_count> flow count
+      And ndr rate
+      When NFVbench API is ready
+      Then 3 runs are started and waiting for maximum result
+      And push result to database
+      And extract offered rate result
+
+     Examples: Frame sizes and flow counts
+      | frame_size | flow_count |
+      | 64         | 128        |
+      | 128        | 128        |
+      | 256        | 128        |
+      | 512        | 128        |
+      | 768        | 128        |
+      | 1024       | 128        |
+      | 1280       | 128        |
+      | 1518       | 128        |
+      | IMIX       | 128        |
+      | 9000       | 128        |
+      | 64         | 10k        |
+      | 128        | 10k        |
+      | 256        | 10k        |
+      | 512        | 10k        |
+      | 768        | 10k        |
+      | 1024       | 10k        |
+      | 1280       | 10k        |
+      | 1518       | 10k        |
+      | IMIX       | 10k        |
+      | 9000       | 10k        |
+      | 64         | 100k       |
+      | 128        | 100k       |
+      | 256        | 100k       |
+      | 512        | 100k       |
+      | 768        | 100k       |
+      | 1024       | 100k       |
+      | 1280       | 100k       |
+      | 1518       | 100k       |
+      | IMIX       | 100k       |
+      | 9000       | 100k       |
+
+
+  @latency
+  Scenario Outline: Run a latency test for a defined frame size and throughput percentage
+      Given 10 sec run duration
+      And TRex is restarted
+      And <frame_size> frame size
+      And 100k flow count
+      And <throughput> rate of previous scenario
+      When NFVbench API is ready
+      Then run is started and waiting for result
+      And push result to database
+
+     Examples: Frame sizes and throughput percentages
+      | frame_size | throughput |
+      | 64         | 70%        |
+      | 64         | 90%        |
+      | 768        | 70%        |
+      | 768        | 90%        |
+      | 1518       | 70%        |
+      | 1518       | 90%        |
+      | 9000       | 70%        |
+      | 9000       | 90%        |
diff --git a/behave_tests/features/characterization-samples.feature b/behave_tests/features/characterization-samples.feature
new file mode 100644 (file)
index 0000000..6adb184
--- /dev/null
@@ -0,0 +1,44 @@
+@characterization
+Feature: characterization
+
+  @throughput
+  Scenario Outline: Run a NDR test for a defined frame size and flow count
+      Given 10 sec run duration
+      And TRex is restarted
+      And <frame_size> frame size
+      And <flow_count> flow count
+      And ndr rate
+      When NFVbench API is ready
+      Then 3 runs are started and waiting for maximum result
+      And push result to database
+      And extract offered rate result
+
+     Examples: Frame sizes and flow counts
+      | frame_size | flow_count |
+      | 64         | 100k        |
+      | 768        | 100k        |
+      | 1518       | 100k        |
+      | 9000       | 100k        |
+
+
+  @latency
+  Scenario Outline: Run a latency test for a defined frame size and throughput percentage
+      Given 10 sec run duration
+      And TRex is restarted
+      And <frame_size> frame size
+      And 100k flow count
+      And <throughput> rate of previous scenario
+      When NFVbench API is ready
+      Then run is started and waiting for result
+      And push result to database
+
+     Examples: Frame sizes and throughput percentages
+      | frame_size | throughput |
+      | 64         | 70%        |
+      | 64         | 90%        |
+      | 768        | 70%        |
+      | 768        | 90%        |
+      | 1518       | 70%        |
+      | 1518       | 90%        |
+      | 9000       | 70%        |
+      | 9000       | 90%        |
diff --git a/behave_tests/features/environment.py b/behave_tests/features/environment.py
new file mode 100644 (file)
index 0000000..ee1aa17
--- /dev/null
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+# Copyright 2021 Orange
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+
+import json
+import os
+import time
+
+
+def before_all(context):
+    context.data = {'config': os.getenv('NFVBENCH_CONFIG_PATH', '/etc/nfvbench/nfvbench.cfg')}
+
+    context.data['PROJECT_NAME'] = os.getenv('PROJECT_NAME', 'nfvbench')
+    context.data['TEST_DB_EXT_URL'] = os.getenv('TEST_DB_EXT_URL')
+    context.data['TEST_DB_URL'] = os.getenv('TEST_DB_URL')
+    context.data['BASE_TEST_DB_URL'] = ''
+    if context.data['TEST_DB_URL']:
+        context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
+    context.data['INSTALLER_TYPE'] = os.getenv('INSTALLER_TYPE')
+    context.data['DEPLOY_SCENARIO'] = os.getenv('DEPLOY_SCENARIO')
+    context.data['NODE_NAME'] = os.getenv('NODE_NAME', 'nfvbench')
+    context.data['BUILD_TAG'] = os.getenv('BUILD_TAG')
+
+
+def before_feature(context, feature):
+    context.rates = {}
+    context.results = {}
+    context.start_time = time.time()
+    context.CASE_NAME = feature.name
+
+
+def before_scenario(context, scenario):
+    context.tag = scenario.tags[0]
+    context.json = {'log_file': '/var/lib/xtesting/results/' + context.CASE_NAME + '/nfvbench.log'}
+    user_label = os.getenv('NFVBENCH_USER_LABEL', None)
+    if user_label:
+        context.json['user_label'] = user_label
+    loopvm_flavor = os.getenv('NFVBENCH_LOOPVM_FLAVOR_NAME', None)
+    if loopvm_flavor:
+        context.json['flavor_type'] = loopvm_flavor
+    context.synthesis = {}
+
+
+def after_feature(context, feature):
+    if context.results:
+        with open(os.path.join(
+                '/var/lib/xtesting/results/' + context.CASE_NAME + '/campaign_result.json'), "w") \
+                as outfile:
+            json.dump(context.results, outfile)
diff --git a/behave_tests/features/non-regression.feature b/behave_tests/features/non-regression.feature
new file mode 100644 (file)
index 0000000..5774418
--- /dev/null
@@ -0,0 +1,48 @@
+@non-regression
+Feature: non-regression
+
+  @throughput
+  Scenario Outline: Run a NDR test for a defined frame size
+      Given 10 sec run duration
+      And TRex is restarted
+      And <frame_size> frame size
+      And 100k flow count
+      And ndr rate
+      When NFVbench API is ready
+      Then 3 runs are started and waiting for maximum result
+      And push result to database
+      And extract offered rate result
+      And verify throughput result is in same range as the previous result
+      And verify throughput result is in same range as the characterization result
+
+     Examples: Frame sizes
+      | frame_size |
+      | 64         |
+      | 768        |
+      | 1518       |
+      | 9000       |
+
+
+  @latency
+  Scenario Outline: Run a latency test for a defined frame size and throughput percentage
+      Given 10 sec run duration
+      And TRex is restarted
+      And <frame_size> frame size
+      And 100k flow count
+      And <throughput> rate of previous scenario
+      When NFVbench API is ready
+      Then run is started and waiting for result
+      And push result to database
+      And verify latency result is in same range as the previous result
+      And verify latency result is in same range as the characterization result
+
+     Examples: Frame sizes and throughput percentages
+      | frame_size | throughput |
+      | 64         | 70%        |
+      | 64         | 90%        |
+      | 768        | 70%        |
+      | 768        | 90%        |
+      | 1518       | 70%        |
+      | 1518       | 90%        |
+      | 9000       | 70%        |
+      | 9000       | 90%        |
diff --git a/behave_tests/features/steps/__init__.py b/behave_tests/features/steps/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/behave_tests/features/steps/steps.py b/behave_tests/features/steps/steps.py
new file mode 100644 (file)
index 0000000..b20a9cc
--- /dev/null
@@ -0,0 +1,455 @@
+#!/usr/bin/env python
+# Copyright 2021 Orange
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+from functools import reduce
+
+from behave import given
+from behave import when
+from behave import then
+from requests import RequestException
+from retry import retry
+import json
+import requests
+import subprocess
+from subprocess import DEVNULL
+
+from nfvbench.summarizer import Formatter
+from nfvbench.traffic_gen.traffic_utils import parse_rate_str
+
+STATUS_ERROR = "ERROR"
+
+STATUS_OK = "OK"
+
+
+"""Given steps."""
+
+
+@given('PROJECT_NAME: {project_name}')
+def override_xtesting_project_name(context, project_name):
+    context.data['PROJECT_NAME'] = project_name
+
+
+@given('TEST_DB_EXT_URL: {test_db_ext_url}')
+def override_xtesting_test_db_ext_url(context, test_db_ext_url):
+    context.data['TEST_DB_EXT_URL'] = test_db_ext_url
+
+
+@given('TEST_DB_URL: {test_db_url}')
+def override_xtesting_test_db_url(context, test_db_url):
+    context.data['TEST_DB_URL'] = test_db_url
+    context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
+
+
+@given('INSTALLER_TYPE: {installer_type}')
+def override_xtesting_installer_type(context, installer_type):
+    context.data['INSTALLER_TYPE'] = installer_type
+
+
+@given('DEPLOY_SCENARIO: {deploy_scenario}')
+def override_xtesting_deploy_scenario(context, deploy_scenario):
+    context.data['DEPLOY_SCENARIO'] = deploy_scenario
+
+
+@given('NODE_NAME: {node_name}')
+def override_xtesting_node_name(context, node_name):
+    context.data['NODE_NAME'] = node_name
+
+
+@given('BUILD_TAG: {build_tag}')
+def override_xtesting_build_tag(context, build_tag):
+    context.data['BUILD_TAG'] = build_tag
+
+
+@given('NFVbench config from file: {config_path}')
+def init_config(context, config_path):
+    context.data['config'] = config_path
+
+
+@given('a JSON NFVbench config')
+def init_config_from_json(context):
+    context.json.update(json.loads(context.text))
+
+
+@given('log file: {log_file_path}')
+def log_config(context, log_file_path):
+    context.json['log_file'] = log_file_path
+
+
+@given('json file: {json_file_path}')
+def json_config(context, json_file_path):
+    context.json['json'] = json_file_path
+
+
+@given('no clean up')
+def add_no_clean_up_flag(context):
+    context.json['no_cleanup'] = 'true'
+
+
+@given('TRex is restarted')
+def add_restart(context):
+    context.json['restart'] = 'true'
+
+
+@given('{label} label')
+def add_label(context, label):
+    context.json['label'] = label
+
+
+@given('{frame_size} frame size')
+def add_frame_size(context, frame_size):
+    context.json['frame_sizes'] = [frame_size]
+
+
+@given('{flow_count} flow count')
+def add_flow_count(context, flow_count):
+    context.json['flow_count'] = flow_count
+
+
+@given('{rate} rate')
+def add_rate(context, rate):
+    context.json['rate'] = rate
+
+
+@given('{duration} sec run duration')
+def add_duration(context, duration):
+    context.json['duration_sec'] = duration
+
+
+@given('{percentage_rate} rate of previous scenario')
+def add_percentage_rate(context, percentage_rate):
+    context.percentage_rate = percentage_rate
+    rate = percentage_previous_rate(context, percentage_rate)
+    context.json['rate'] = rate
+
+
+"""When steps."""
+
+
+@when('NFVbench API is ready')
+@when('NFVbench API is ready on host {host_ip}')
+@when('NFVbench API is ready on host {host_ip} and port {port:d}')
+def start_server(context, host_ip="127.0.0.1", port=7555):
+    context.host_ip = host_ip
+    context.port = port
+    try:
+        # check if API is already available
+        requests.get(
+            "http://{host_ip}:{port}/status".format(host_ip=context.host_ip, port=context.port))
+    except RequestException:
+        cmd = ["nfvbench", "-c", context.data['config'], "--server"]
+        if host_ip != "127.0.0.1":
+            cmd.append("--host")
+            cmd.append(host_ip)
+        if port != 7555:
+            cmd.append("--port")
+            cmd.append(port)
+
+        subprocess.Popen(cmd, stdout=DEVNULL, stderr=subprocess.STDOUT)
+
+    test_nfvbench_api(context)
+
+
+"""Then steps."""
+
+
+@then('run is started and waiting for result')
+@then('{repeat:d} runs are started and waiting for maximum result')
+def step_impl(context, repeat=1):
+    results = []
+    if 'json' not in context.json:
+        context.json['json'] = '/var/lib/xtesting/results/' + context.CASE_NAME + \
+                               '/nfvbench-' + context.tag + '-fs_' + \
+                               context.json['frame_sizes'][0] + '-fc_' + \
+                               context.json['flow_count'] + '-rate_' + \
+                               context.json['rate'] + '.json'
+    json_base_name = context.json['json']
+    for i in range(repeat):
+        if repeat > 1:
+            context.json['json'] = json_base_name.strip('.json') + '-' + str(i) + '.json'
+
+        url = "http://{ip}:{port}/start_run".format(ip=context.host_ip, port=context.port)
+        payload = json.dumps(context.json)
+        r = requests.post(url, data=payload, headers={'Content-Type': 'application/json'})
+        context.request_id = json.loads(r.text)["request_id"]
+        assert r.status_code == 200
+        result = wait_result(context)
+        results.append(result)
+        assert result["status"] == STATUS_OK
+
+
+    context.result = reduce(
+        lambda x, y: x if extract_value(x, "total_tx_rate") > extract_value(y,
+                                                                            "total_tx_rate") else y,
+        results)
+
+    total_tx_rate = extract_value(context.result, "total_tx_rate")
+    context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
+    overall = extract_value(context.result, "overall")
+    avg_delay_usec = extract_value(overall, "avg_delay_usec")
+    # create a synthesis with offered pps and latency values
+    context.synthesis['total_tx_rate'] = total_tx_rate
+    context.synthesis['avg_delay_usec'] = avg_delay_usec
+
+
+@then('extract offered rate result')
+def save_rate_result(context):
+    total_tx_rate = extract_value(context.result, "total_tx_rate")
+    context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']] = total_tx_rate
+
+
+@then('verify throughput result is in same range as the previous result')
+@then('verify throughput result is greater than {threshold} of the previous result')
+def get_throughput_result_from_database(context, threshold='90%'):
+    last_result = get_last_result(context)
+
+    if last_result:
+        compare_throughput_values(context, last_result, threshold)
+
+
+@then('verify latency result is in same range as the previous result')
+@then('verify latency result is greater than {threshold} of the previous result')
+def get_latency_result_from_database(context, threshold='90%'):
+    last_result = get_last_result(context)
+
+    if last_result:
+        compare_latency_values(context, last_result, threshold)
+
+@then(
+    'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
+def compare_throughput_pps_result_with_range_values(context, min_reference_value,
+                                                    max_reference_value):
+    context.unit = 'pps'
+    reference_values = [min_reference_value + 'pps', max_reference_value + 'pps']
+    throughput_comparison(context, reference_values=reference_values)
+
+
+@then(
+    'verify result is in [{min_reference_value}bps, {max_reference_value}bps] range for throughput')
+def compare_throughput_bps_result_with_range_values(context, min_reference_value,
+                                                    max_reference_value):
+    context.unit = 'bps'
+    reference_values = [min_reference_value + 'bps', max_reference_value + 'bps']
+    throughput_comparison(context, reference_values=reference_values)
+
+
+@then('verify result is in {reference_values} range for latency')
+def compare_result_with_range_values(context, reference_values):
+    latency_comparison(context, reference_values=reference_values)
+
+
+@then('verify throughput result is in same range as the characterization result')
+@then('verify throughput result is greater than {threshold} of the characterization result')
+def get_characterization_throughput_result_from_database(context, threshold='90%'):
+    last_result = get_last_result(context, True)
+    if not last_result:
+        raise AssertionError("No characterization result found.")
+    compare_throughput_values(context, last_result, threshold)
+
+
+@then('verify latency result is in same range as the characterization result')
+@then('verify latency result is greater than {threshold} of the characterization result')
+def get_characterization_latency_result_from_database(context, threshold='90%'):
+    last_result = get_last_result(context, True)
+    if not last_result:
+        raise AssertionError("No characterization result found.")
+    compare_latency_values(context, last_result, threshold)
+
+@then('push result to database')
+def push_result_database(context):
+    if context.tag == "latency":
+        # override input rate value with percentage one to avoid no match
+        # if pps is not accurate with previous one
+        context.json["rate"] = context.percentage_rate
+    json_result = {"synthesis": context.synthesis, "input": context.json, "output": context.result}
+
+    if context.tag not in context.results:
+        context.results[context.tag] = [json_result]
+    else:
+        context.results[context.tag].append(json_result)
+
+
+"""Utils methods."""
+
+
+@retry(AssertionError, tries=10, delay=5.0, logger=None)
+def test_nfvbench_api(context):
+    try:
+        r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
+        assert r.status_code == 200
+        assert json.loads(r.text)["error_message"] == "no pending NFVbench run"
+    except RequestException as exc:
+        raise AssertionError("Fail to access NFVbench API") from exc
+
+
+@retry(AssertionError, tries=1000, delay=2.0, logger=None)
+def wait_result(context):
+    r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
+    context.raw_result = r.text
+    result = json.loads(context.raw_result)
+    assert r.status_code == 200
+    assert result["status"] == STATUS_OK or result["status"] == STATUS_ERROR
+    return result
+
+
+def percentage_previous_rate(context, rate):
+    previous_rate = context.rates[context.json['frame_sizes'][0] + '_' + context.json['flow_count']]
+
+    if rate.endswith('%'):
+        rate_percent = convert_percentage_str_to_float(rate)
+        return str(int(previous_rate * rate_percent)) + 'pps'
+    raise Exception('Unknown rate string format %s' % rate)
+
+
+def convert_percentage_str_to_float(percentage):
+    float_percent = float(percentage.replace('%', '').strip())
+    if float_percent <= 0 or float_percent > 100.0:
+        raise Exception('%s is out of valid range (must be 1-100%%)' % percentage)
+    return float_percent / 100
+
+
+def compare_throughput_values(context, last_result, threshold):
+    assert last_result["output"]["status"] == context.result["status"]
+    if last_result["output"]["status"] == "OK":
+        old_throughput = extract_value(last_result["output"], "total_tx_rate")
+        throughput_comparison(context, old_throughput, threshold=threshold)
+
+
+def compare_latency_values(context, last_result, threshold):
+    assert last_result["output"]["status"] == context.result["status"]
+    if last_result["output"]["status"] == "OK":
+        old_latency = extract_value(extract_value(last_result["output"], "overall"),
+                                    "avg_delay_usec")
+        latency_comparison(context, old_latency, threshold=threshold)
+
+
+def throughput_comparison(context, old_throughput_pps=None, threshold=None, reference_values=None):
+    current_throughput_pps = extract_value(context.result, "total_tx_rate")
+
+    if old_throughput_pps:
+        if not current_throughput_pps >= convert_percentage_str_to_float(
+                threshold) * old_throughput_pps:
+            raise AssertionError(
+                "Current run throughput {current_throughput_pps} is not over {threshold} "
+                " of previous value ({old_throughput_pps}pps)".format(
+                    current_throughput_pps=Formatter.suffix('pps')(
+                        Formatter.standard(current_throughput_pps)),
+                    threshold=threshold, old_throughput_pps=Formatter.suffix('pps')(
+                        Formatter.standard(old_throughput_pps))))
+    elif reference_values:
+        if context.unit == 'bps':
+            current_throughput = extract_value(context.result, "offered_tx_rate_bps")
+            reference_values = [int(parse_rate_str(x)['rate_bps']) for x in reference_values]
+            formatted_current_throughput = Formatter.bits(current_throughput)
+            formatted_min_reference_value = Formatter.bits(reference_values[0])
+            formatted_max_reference_value = Formatter.bits(reference_values[1])
+        else:
+            current_throughput = current_throughput_pps
+            reference_values = [int(parse_rate_str(x)['rate_pps']) for x in reference_values]
+            formatted_current_throughput = Formatter.suffix('pps')(
+                Formatter.standard(current_throughput))
+            formatted_min_reference_value = Formatter.suffix('pps')(
+                Formatter.standard(reference_values[0]))
+            formatted_max_reference_value = Formatter.suffix('pps')(
+                Formatter.standard(reference_values[1]))
+        if not reference_values[0] <= int(current_throughput) <= reference_values[1]:
+            raise AssertionError(
+                "Current run throughput {current_throughput} is not in reference values "
+                "[{min_reference_value}, {max_reference_value}]".format(
+                    current_throughput=formatted_current_throughput,
+                    min_reference_value=formatted_min_reference_value,
+                    max_reference_value=formatted_max_reference_value))
+
+
+def latency_comparison(context, old_latency=None, threshold=None, reference_values=None):
+    overall = extract_value(context.result, "overall")
+    current_latency = extract_value(overall, "avg_delay_usec")
+
+    if old_latency:
+        if not current_latency <= (2 - convert_percentage_str_to_float(threshold)) * old_latency:
+            threshold = str(200 - int(threshold.strip('%'))) + '%'
+            raise AssertionError(
+                "Current run latency {current_latency}usec is not less than {threshold} of "
+                "previous value ({old_latency}usec)".format(
+                    current_latency=Formatter.standard(current_latency), threshold=threshold,
+                    old_latency=Formatter.standard(old_latency)))
+    elif reference_values:
+        if not reference_values[0] <= current_latency <= reference_values[1]:
+            raise AssertionError(
+                "Current run latency {current_latency}usec is not in reference values "
+                "[{min_reference_value}, {max_reference_value}]".format(
+                    current_latency=Formatter.standard(current_latency),
+                    min_reference_value=Formatter.standard(reference_values[0]),
+                    max_reference_value=Formatter.standard(reference_values[1])))
+
+
+def get_result_from_input_values(input, result):
+    # Select required keys (other keys can be not set or unconsistent between scenarios)
+    required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
+    if 'user_label' in result:
+        required_keys.append('user_label')
+    if 'flavor_type' in result:
+        required_keys.append('flavor_type')
+    subset_input = dict((k, input[k]) for k in required_keys if k in input)
+    subset_result = dict((k, result[k]) for k in required_keys if k in result)
+    return subset_input == subset_result
+
+
+def extract_value(obj, key):
+    """Pull all values of specified key from nested JSON."""
+    arr = []
+
+    def extract(obj, arr, key):
+        """Recursively search for values of key in JSON tree."""
+        if isinstance(obj, dict):
+            for k, v in obj.items():
+                if k == key:
+                    arr.append(v)
+                elif isinstance(v, (dict, list)):
+                    extract(v, arr, key)
+        elif isinstance(obj, list):
+            for item in obj:
+                extract(item, arr, key)
+        return arr
+
+    results = extract(obj, arr, key)
+    return results[0]
+
+
+def get_last_result(context, reference=None, page=None):
+    if reference:
+        case_name = 'characterization'
+    else:
+        case_name = context.CASE_NAME
+    url = context.data['TEST_DB_URL'] + '?project={project_name}&case={case_name}'.format(
+        project_name=context.data['PROJECT_NAME'], case_name=case_name)
+    if context.data['INSTALLER_TYPE']:
+        url += '&installer={installer_name}'.format(installer_name=context.data['INSTALLER_TYPE'])
+    if context.data['NODE_NAME']:
+        url += '&pod={pod_name}'.format(pod_name=context.data['NODE_NAME'])
+    url += '&criteria=PASS'
+    if page:
+        url += '&page={page}'.format(page=page)
+    last_results = requests.get(url)
+    assert last_results.status_code == 200
+    last_results = json.loads(last_results.text)
+    for result in last_results["results"]:
+        for tagged_result in result["details"]["results"][context.tag]:
+            if get_result_from_input_values(tagged_result["input"], context.json):
+                return tagged_result
+    if last_results["pagination"]["current_page"] < last_results["pagination"]["total_pages"]:
+        page = last_results["pagination"]["current_page"] + 1
+        return get_last_result(context, page)
+    return None
index be93d8e..8676bbd 100644 (file)
@@ -32,20 +32,22 @@ RUN apt-get update && apt-get install -y \
        && python3 get-pip.py \
        && pip3 install -U pbr \
        && pip3 install -U setuptools \
-       && cd / \
+       && cd /opt \
        # Note: do not clone with --depth 1 as it will cause pbr to fail extracting the nfvbench version
        # from the git tag
        && git clone https://gerrit.opnfv.org/gerrit/nfvbench \
-       && cd /nfvbench && pip3 install -e . \
+       && cd nfvbench && pip3 install -e . \
        && wget -O nfvbenchvm-$VM_IMAGE_VER.qcow2 http://artifacts.opnfv.org/nfvbench/images/nfvbenchvm_centos-$VM_IMAGE_VER.qcow2 \
        # Override Xtesting testcases.yaml file by NFVbench default one
        && cp xtesting/testcases.yaml /usr/local/lib/python3.8/dist-packages/xtesting/ci/testcases.yaml \
+       # Temporary override waiting for PR approval : https://gerrit.opnfv.org/gerrit/c/functest-xtesting/+/72431
+       && cp xtesting/behaveframework.py /usr/local/lib/python3.8/dist-packages/xtesting/core/behaveframework.py \
        && python3 ./docker/cleanup_generators.py \
-       && rm -rf /nfvbench/.git \
+       && rm -rf /opt/nfvbench/.git \
        && apt-get remove -y wget git python3-dev libpython3.8-dev gcc \
        && apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/*
 
 ENV TREX_EXT_LIBS "/opt/trex/$TREX_VER/external_libs"
 
 
-ENTRYPOINT ["/nfvbench/docker/nfvbench-entrypoint.sh"]
+ENTRYPOINT ["/opt/nfvbench/docker/nfvbench-entrypoint.sh"]
index 812816d..913e5ba 100755 (executable)
@@ -13,7 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 #
-if [ -z "$1" ] ||  ([ $1 != 'start_rest_server' ] &&  [ $1 != 'run_tests' ]); then
+if [ -z "$1" ] ||  ([ $1 != 'start_rest_server' ] &&  [ $1 != 'run_tests' ] &&  [ $1 != 'zip_campaign' ]); then
         tail -f /dev/null
 elif [ $1 == 'run_tests' ]; then
         PARAMS=""
@@ -22,6 +22,8 @@ elif [ $1 == 'run_tests' ]; then
             PARAMS+="$var "
         done
         eval "run_tests $PARAMS"
+elif [ $1 == 'zip_campaign' ]; then
+        zip_campaign
 else
         PARAMS="--server"
         if [ -n "$HOST" ]; then
index 7acb783..bd86810 100644 (file)
@@ -246,8 +246,7 @@ class NFVBench(object):
             config.cache_size = config.flow_count
 
         # The size must be capped to 10000 (where does this limit come from?)
-        if config.cache_size > 10000:
-            config.cache_size = 10000
+        config.cache_size = min(config.cache_size, 10000)
 
         config.duration_sec = float(config.duration_sec)
         config.interval_sec = float(config.interval_sec)
index 80a0817..512422d 100644 (file)
@@ -127,11 +127,11 @@ def get_intel_pci(nic_slot=None, nic_ports=None):
         trex_base_dir = '/opt/trex'
         contents = os.listdir(trex_base_dir)
         trex_dir = os.path.join(trex_base_dir, contents[0])
-        process = subprocess.Popen(['python', 'dpdk_setup_ports.py', '-s'],
-                                   cwd=trex_dir,
-                                   stdout=subprocess.PIPE,
-                                   stderr=subprocess.PIPE)
-        devices, _ = process.communicate()
+        with subprocess.Popen(['python', 'dpdk_setup_ports.py', '-s'],
+                              cwd=trex_dir,
+                              stdout=subprocess.PIPE,
+                              stderr=subprocess.PIPE) as process:
+            devices, _ = process.communicate()
     except Exception:
         devices = ''
 
@@ -147,10 +147,10 @@ def get_intel_pci(nic_slot=None, nic_ports=None):
             intf_name = glob.glob("/sys/bus/pci/devices/%s/net/*" % port[0])
             if intf_name:
                 intf_name = intf_name[0][intf_name[0].rfind('/') + 1:]
-                process = subprocess.Popen(['ip', '-o', '-d', 'link', 'show', intf_name],
-                                           stdout=subprocess.PIPE,
-                                           stderr=subprocess.PIPE)
-                intf_info, _ = process.communicate()
+                with subprocess.Popen(['ip', '-o', '-d', 'link', 'show', intf_name],
+                                      stdout=subprocess.PIPE,
+                                      stderr=subprocess.PIPE) as process:
+                    intf_info, _ = process.communicate()
                 if re.search('team_slave|bond_slave', intf_info.decode("utf-8")):
                     device_ports_list[port[0].split('.')[0]]['busy'] = True
         for port in matches:
index 732d807..b002c68 100644 (file)
@@ -23,4 +23,6 @@ flask>=0.12
 fluent-logger>=0.5.3
 netaddr>=0.7.19
 xtesting>=0.87.0
-hdrhistogram>=0.8.0
\ No newline at end of file
+hdrhistogram>=0.8.0
+behave>=1.2.6
+retry>=0.9.2
\ No newline at end of file
index e1b1ddf..d819ecb 100644 (file)
--- a/setup.cfg
+++ b/setup.cfg
@@ -35,11 +35,14 @@ classifier =
 [files]
 packages =
     nfvbench
+    behave_tests
 
 [entry_points]
 console_scripts =
     nfvbench = nfvbench.nfvbench:main
     nfvbench_client = client.nfvbench_client:main
+xtesting.testcase =
+    nfvbench_behaveframework = behave_tests.behavedriver:BehaveDriver
 
 [compile_catalog]
 directory = nfvbench/locale
index ac7daf1..cb8a081 100644 (file)
@@ -45,6 +45,7 @@ except ImportError:
     api_mod.Ether = STLDummy
     api_mod.ARP = STLDummy
     api_mod.IP = STLDummy
+    api_mod.ARP = STLDummy
     api_mod.STLClient = STLDummy
     api_mod.STLFlowLatencyStats = STLDummy
     api_mod.STLFlowStats = STLDummy
index 9e3f1b9..125032f 100644 (file)
@@ -1,5 +1,8 @@
 docker_args:
   env: {}
+  params:
+    net: host
+    privileged: true
   volumes:
     - /lib/modules/$(uname -r):/lib/modules/$(uname -r)
     - /usr/src/kernels:/usr/src/kernels -v /dev:/dev
index 4643a32..37fa6c3 100644 (file)
@@ -16,5 +16,7 @@
         - container: nfvbench
           tests:
             - 10kpps-pvp-run
+            - characterization
+            - non-regression
           properties:
-            execution-type: SEQUENTIALLY
+            execution-type: SEQUENTIALLY
\ No newline at end of file
diff --git a/xtesting/behaveframework.py b/xtesting/behaveframework.py
new file mode 100644 (file)
index 0000000..651240d
--- /dev/null
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+# Copyright 2021 Orange
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+"""Define classes required to run any Behave test suites."""
+
+from __future__ import division
+
+import logging
+import os
+import time
+
+import json
+import six
+
+from behave.__main__ import main as behave_main
+
+from xtesting.core import testcase
+
+__author__ = "Deepak Chandella <deepak.chandella@orange.com>"
+
+
+class BehaveFramework(testcase.TestCase):
+    """BehaveFramework runner."""
+    # pylint: disable=too-many-instance-attributes
+
+    __logger = logging.getLogger(__name__)
+    dir_results = "/var/lib/xtesting/results"
+
+    def __init__(self, **kwargs):
+        super().__init__(**kwargs)
+        self.json_file = os.path.join(self.res_dir, 'output.json')
+        self.total_tests = 0
+        self.pass_tests = 0
+        self.fail_tests = 0
+        self.skip_tests = 0
+        self.response = None
+
+    def parse_results(self):
+        """Parse output.json and get the details in it."""
+        with open(self.json_file) as stream_:
+            self.response = json.load(stream_)
+            if self.response:
+                self.total_tests = len(self.response)
+            for item in self.response:
+                if item['status'] == 'passed':
+                    self.pass_tests += 1
+                elif item['status'] == 'failed':
+                    self.fail_tests += 1
+                elif item['status'] == 'skipped':
+                    self.skip_tests += 1
+            self.result = 100 * (
+                self.pass_tests / self.total_tests)
+            self.details = {}
+            self.details['total_tests'] = self.total_tests
+            self.details['pass_tests'] = self.pass_tests
+            self.details['fail_tests'] = self.fail_tests
+            self.details['skip_tests'] = self.skip_tests
+            self.details['tests'] = self.response
+
+    def run(self, **kwargs):
+        """Run the BehaveFramework feature files
+
+        Here are the steps:
+           * create the output directories if required,
+           * run behave features with parameters
+           * get the results in output.json,
+
+        Args:
+            kwargs: Arbitrary keyword arguments.
+
+        Returns:
+            EX_OK if all suites ran well.
+            EX_RUN_ERROR otherwise.
+        """
+        try:
+            suites = kwargs["suites"]
+            tags = kwargs.get("tags", [])
+            console = kwargs["console"] if "console" in kwargs else False
+        except KeyError:
+            self.__logger.exception("Mandatory args were not passed")
+            return self.EX_RUN_ERROR
+        if not os.path.exists(self.res_dir):
+            try:
+                os.makedirs(self.res_dir)
+            except Exception:  # pylint: disable=broad-except
+                self.__logger.exception("Cannot create %s", self.res_dir)
+                return self.EX_RUN_ERROR
+        config = ['--tags=' + ','.join(tags),
+                  '--junit', '--junit-directory={}'.format(self.res_dir),
+                  '--format=json', '--outfile={}'.format(self.json_file)]
+        if six.PY3:
+            html_file = os.path.join(self.res_dir, 'output.html')
+            config += ['--format=behave_html_formatter:HTMLFormatter',
+                       '--outfile={}'.format(html_file)]
+        if console:
+            config += ['--format=pretty',
+                       '--outfile=-']
+        for feature in suites:
+            config.append(feature)
+        self.start_time = time.time()
+        behave_main(config)
+        self.stop_time = time.time()
+
+        try:
+            self.parse_results()
+            self.__logger.info("Results were successfully parsed")
+        except Exception:  # pylint: disable=broad-except
+            self.__logger.exception("Cannot parse results")
+            return self.EX_RUN_ERROR
+        return self.EX_OK
index cbb5c45..eea60b9 100644 (file)
@@ -4,6 +4,65 @@ tiers:
         name: nfvbench
         order: 1
         description: 'Data Plane Performance Testing'
+        testcases:
+            -
+                case_name: characterization
+                project_name: nfvbench
+                criteria: 100
+                blocking: true
+                clean_flag: false
+                description: ''
+                run:
+                    name: 'nfvbench_behaveframework'
+                    args:
+                        suites:
+                            - /opt/nfvbench/behave_tests/features/characterization-full.feature
+                        tags:
+                            - characterization
+                        console:
+                            - true
+            -
+                case_name: non-regression
+                project_name: nfvbench
+                criteria: 100
+                blocking: true
+                clean_flag: false
+                description: ''
+                run:
+                    name: 'nfvbench_behaveframework'
+                    args:
+                        suites:
+                            - /opt/nfvbench/behave_tests/features/non-regression.feature
+                        tags:
+                            - non-regression
+                        console:
+                            - true
+
+    -
+        name: nfvbench-rapid-characterization
+        order: 2
+        description: 'Data Plane Performance Testing'
+        testcases:
+            -
+                case_name: rapid-characterization
+                project_name: nfvbench
+                criteria: 100
+                blocking: true
+                clean_flag: false
+                description: ''
+                run:
+                    name: 'nfvbench_behaveframework'
+                    args:
+                        suites:
+                            - /opt/nfvbench/behave_tests/features/characterization-samples.feature
+                        tags:
+                            - characterization
+                        console:
+                            - true
+    -
+        name: nfvbench-demo
+        order: 3
+        description: 'Data Plane Performance Testing'
         testcases:
             -
                 case_name: 10kpps-pvp-run
@@ -17,4 +76,5 @@ tiers:
                     args:
                         cmd:
                             - nfvbench -c /tmp/nfvbench/nfvbench.cfg --rate 10kpps
-
+                        console:
+                            - true
\ No newline at end of file