integration: Tunneling protocols support update
[vswitchperf.git] / vsperf
diff --git a/vsperf b/vsperf
index 44e4542..a141742 100755 (executable)
--- a/vsperf
+++ b/vsperf
@@ -1,6 +1,6 @@
 #!/usr/bin/env python3
 
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,19 +23,20 @@ import sys
 import argparse
 import re
 import time
+import csv
 import datetime
 import shutil
 import unittest
-import xmlrunner
 import locale
 import copy
 import glob
 import subprocess
-
-sys.dont_write_bytecode = True
-
+import ast
+import xmlrunner
+from tabulate import tabulate
+from conf import merge_spec
 from conf import settings
-from conf import get_test_param
+import core.component_factory as component_factory
 from core.loader import Loader
 from testcases import PerformanceTestCase
 from testcases import IntegrationTestCase
@@ -44,8 +45,7 @@ from tools import networkcard
 from tools import functions
 from tools.pkt_gen import trafficgen
 from tools.opnfvdashboard import opnfvdashboard
-from tools.pkt_gen.trafficgen.trafficgenhelper import TRAFFIC_DEFAULTS
-import core.component_factory as component_factory
+sys.dont_write_bytecode = True
 
 VERBOSITY_LEVELS = {
     'debug': logging.DEBUG,
@@ -63,33 +63,67 @@ _TEMPLATE_RST = {'head'  : os.path.join(_CURR_DIR, 'tools/report/report_head.rst
                  'tmp'   : os.path.join(_CURR_DIR, 'tools/report/report_tmp_caption.rst')
                 }
 
+_TEMPLATE_MATRIX = "Performance Matrix\n------------------\n\n"\
+                   "The following performance matrix was generated with the results of all the\n"\
+                   "currently run tests. The metric used for comparison is {}.\n\n{}\n\n"
 
 _LOGGER = logging.getLogger()
 
+def parse_param_string(values):
+    """
+    Parse and split a single '--test-params' argument.
+
+    This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
+    values. For multiple overrides use a ; separated list for
+    e.g. --test-params 'x=z; y=(a,b)'
+    """
+    results = {}
+
+    if values == '':
+        return {}
+
+    for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
+        param = param.strip()
+        value = value.strip()
+        if param:
+            if value:
+                # values are passed inside string from CLI, so we must retype them accordingly
+                try:
+                    results[param] = ast.literal_eval(value)
+                except ValueError:
+                    # for backward compatibility, we have to accept strings without quotes
+                    _LOGGER.warning("Adding missing quotes around string value: %s = %s",
+                                    param, str(value))
+                    results[param] = str(value)
+            else:
+                results[param] = True
+    return results
+
+
 def parse_arguments():
     """
     Parse command line arguments.
     """
     class _SplitTestParamsAction(argparse.Action):
         """
-        Parse and split the '--test-params' argument.
+        Parse and split '--test-params' arguments.
 
-        This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
-        values. For multiple overrides use a ; separated list for
+        This expects either a single list of ; separated overrides
+        as 'x=y', 'x=y,z' or 'x' (implicit true) values.
         e.g. --test-params 'x=z; y=(a,b)'
+        Or a list of these ; separated lists with overrides for
+        multiple tests.
+        e.g. --test-params "['x=z; y=(a,b)','x=z']"
         """
         def __call__(self, parser, namespace, values, option_string=None):
-            results = {}
-
-            for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
-                param = param.strip()
-                value = value.strip()
-                if len(param):
-                    if len(value):
-                        results[param] = value
-                    else:
-                        results[param] = True
-
+            if values[0] == '[':
+                input_list = ast.literal_eval(values)
+                parameter_list = []
+                for test_params in input_list:
+                    parameter_list.append(parse_param_string(test_params))
+            else:
+                parameter_list = parse_param_string(values)
+            results = {'_PARAMS_LIST':parameter_list}
             setattr(namespace, self.dest, results)
 
     class _ValidateFileAction(argparse.Action):
@@ -121,7 +155,7 @@ def parse_arguments():
     def list_logging_levels():
         """Give a summary of all available logging levels.
 
-       :return: List of verbosity level names in decreasing order of
+        :return: List of verbosity level names in decreasing order of
             verbosity
         """
         return sorted(VERBOSITY_LEVELS.keys(),
@@ -142,6 +176,8 @@ def parse_arguments():
                         help='list all system forwarding applications and exit')
     parser.add_argument('--list-vnfs', action='store_true',
                         help='list all system vnfs and exit')
+    parser.add_argument('--list-loadgens', action='store_true',
+                        help='list all background load generators')
     parser.add_argument('--list-settings', action='store_true',
                         help='list effective settings configuration and exit')
     parser.add_argument('exact_test_name', nargs='*', help='Exact names of\
@@ -169,6 +205,7 @@ def parse_arguments():
     group.add_argument('--vswitch', help='vswitch implementation to use')
     group.add_argument('--fwdapp', help='packet forwarding application to use')
     group.add_argument('--vnf', help='vnf to use')
+    group.add_argument('--loadgen', help='loadgen to use')
     group.add_argument('--sysmetrics', help='system metrics logger to use')
     group = parser.add_argument_group('test behavior options')
     group.add_argument('--xunit', action='store_true',
@@ -181,9 +218,14 @@ def parse_arguments():
                        help='settings file')
     group.add_argument('--test-params', action=_SplitTestParamsAction,
                        help='csv list of test parameters: key=val; e.g. '
-                       'TRAFFICGEN_PKT_SIZES=(64,128);TRAFICGEN_DURATION=30; '
-                       'GUEST_LOOPBACK=["l2fwd"] ...')
+                       'TRAFFICGEN_PKT_SIZES=(64,128);TRAFFICGEN_DURATION=30; '
+                       'GUEST_LOOPBACK=["l2fwd"] ...'
+                       ' or a list of csv lists of test parameters: key=val; e.g. '
+                       '[\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)\','
+                       '\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)\']')
     group.add_argument('--opnfvpod', help='name of POD in opnfv')
+    group.add_argument('--matrix', help='enable performance matrix analysis',
+                       action='store_true', default=False)
 
     args = vars(parser.parse_args())
 
@@ -280,6 +322,39 @@ def check_and_set_locale():
         _LOGGER.warning("Locale was not properly configured. Default values were set. Old locale: %s, New locale: %s",
                         system_locale, locale.getdefaultlocale())
 
+def get_vswitch_names(rst_files):
+    """ Function will return a list of vSwitches detected in given ``rst_files``.
+    """
+    vswitch_names = set()
+    if rst_files:
+        try:
+            output = subprocess.check_output(['grep', '-h', '^* vSwitch'] + rst_files).decode().splitlines()
+            for line in output:
+                match = re.search(r'^\* vSwitch: ([^,]+)', str(line))
+                if match:
+                    vswitch_names.add(match.group(1))
+
+            if vswitch_names:
+                return list(vswitch_names)
+
+        except subprocess.CalledProcessError:
+            _LOGGER.warning('Cannot detect vSwitches used during testing.')
+
+    # fallback to the default value
+    return ['vSwitch']
+
+def get_build_tag():
+    """ Function will return a Jenkins job ID environment variable.
+    """
+
+    try:
+        build_tag = os.environ['BUILD_TAG']
+
+    except KeyError:
+        _LOGGER.warning('Cannot detect Jenkins job ID')
+        build_tag = "none"
+
+    return build_tag
 
 def generate_final_report():
     """ Function will check if partial test results are available
@@ -289,18 +364,15 @@ def generate_final_report():
     path = settings.getValue('RESULTS_PATH')
     # check if there are any results in rst format
     rst_results = glob.glob(os.path.join(path, 'result*rst'))
-    if len(rst_results):
+    pkt_processors = get_vswitch_names(rst_results)
+    if rst_results:
         try:
-            test_report = os.path.join(path, '{}_{}'.format(settings.getValue('VSWITCH'), _TEMPLATE_RST['final']))
+            test_report = os.path.join(path, '{}_{}'.format('_'.join(pkt_processors), _TEMPLATE_RST['final']))
             # create report caption directly - it is not worth to execute jinja machinery
-            if settings.getValue('VSWITCH').lower() != 'none':
-                pkt_processor = Loader().get_vswitches()[settings.getValue('VSWITCH')].__doc__.strip().split('\n')[0]
-            else:
-                pkt_processor = Loader().get_pktfwds()[settings.getValue('PKTFWD')].__doc__.strip().split('\n')[0]
             report_caption = '{}\n{} {}\n{}\n\n'.format(
                 '============================================================',
                 'Performance report for',
-                pkt_processor,
+                ', '.join(pkt_processors),
                 '============================================================')
 
             with open(_TEMPLATE_RST['tmp'], 'w') as file_:
@@ -312,7 +384,7 @@ def generate_final_report():
             if retval == 0 and os.path.isfile(test_report):
                 _LOGGER.info('Overall test report written to "%s"', test_report)
             else:
-                _LOGGER.error('Generatrion of overall test report has failed.')
+                _LOGGER.error('Generation of overall test report has failed.')
 
             # remove temporary file
             os.remove(_TEMPLATE_RST['tmp'])
@@ -321,6 +393,69 @@ def generate_final_report():
             _LOGGER.error('Generatrion of overall test report has failed.')
 
 
+def generate_performance_matrix(selected_tests, results_path):
+    """
+    Loads the results of all the currently run tests, compares them
+    based on the MATRIX_METRIC, outputs and saves the generated table.
+    :selected_tests: list of currently run test
+    :results_path: directory path to the results of current tests
+    """
+    _LOGGER.info('Performance Matrix:')
+    test_list = []
+
+    for test in selected_tests:
+        test_name = test.get('Name', '<Name not set>')
+        test_deployment = test.get('Deployment', '<Deployment not set>')
+        test_list.append({'test_name':test_name, 'test_deployment':test_deployment, 'csv_data':False})
+
+    test_params = {}
+    output = []
+    all_params = settings.getValue('_PARAMS_LIST')
+    for i in range(len(selected_tests)):
+        test = test_list[i]
+        if isinstance(all_params, list):
+            list_index = i
+            if i >= len(all_params):
+                list_index = len(all_params) - 1
+            if settings.getValue('CUMULATIVE_PARAMS') and (i > 0):
+                test_params.update(all_params[list_index])
+            else:
+                test_params = all_params[list_index]
+        else:
+            test_params = all_params
+        settings.setValue('TEST_PARAMS', test_params)
+        test['test_params'] = copy.deepcopy(test_params)
+        try:
+            with open("{}/result_{}_{}_{}.csv".format(results_path, str(i),
+                                                      test['test_name'], test['test_deployment'])) as csvfile:
+                reader = list(csv.DictReader(csvfile))
+                test['csv_data'] = reader[0]
+        # pylint: disable=broad-except
+        except (Exception) as ex:
+            _LOGGER.error("Result file not found: %s", ex)
+
+    metric = settings.getValue('MATRIX_METRIC')
+    change = {}
+    output_header = ("ID", "Name", metric, "Change [%]", "Parameters, "\
+                     "CUMULATIVE_PARAMS = {}".format(settings.getValue('CUMULATIVE_PARAMS')))
+    if not test_list[0]['csv_data'] or float(test_list[0]['csv_data'][metric]) == 0:
+        _LOGGER.error("Incorrect format of test results")
+        return
+    for i, test in enumerate(test_list):
+        if test['csv_data']:
+            change[i] = float(test['csv_data'][metric])/\
+                        (float(test_list[0]['csv_data'][metric]) / 100) - 100
+            output.append([i, test['test_name'], float(test['csv_data'][metric]),
+                           change[i], str(test['test_params'])[1:-1]])
+        else:
+            change[i] = 0
+            output.append([i, test['test_name'], "Test Failed", 0, test['test_params']])
+    print(tabulate(output, headers=output_header, tablefmt="grid", floatfmt="0.3f"))
+    with open(results_path + '/result_performance_matrix.rst', 'w+') as output_file:
+        output_file.write(_TEMPLATE_MATRIX.format(metric, tabulate(output, headers=output_header,
+                                                                   tablefmt="rst", floatfmt="0.3f")))
+        _LOGGER.info('Performance matrix written to: "%s/result_performance_matrix.rst"', results_path)
+
 def enable_sriov(nic_list):
     """ Enable SRIOV for given enhanced PCI IDs
 
@@ -338,18 +473,20 @@ def enable_sriov(nic_list):
                 sriov_nic.update({tmp_nic[0] : int(tmp_nic[1][2:])})
 
     # sriov is required for some NICs
-    if len(sriov_nic):
+    if sriov_nic:
         for nic in sriov_nic:
             # check if SRIOV is supported and enough virt interfaces are available
             if not networkcard.is_sriov_supported(nic) \
                 or networkcard.get_sriov_numvfs(nic) <= sriov_nic[nic]:
                 # if not, enable and set appropriate number of VFs
                 if not networkcard.set_sriov_numvfs(nic, sriov_nic[nic] + 1):
-                    _LOGGER.error("SRIOV cannot be enabled for NIC %s", nic)
-                    raise
+                    raise RuntimeError('SRIOV cannot be enabled for NIC {}'.format(nic))
                 else:
                     _LOGGER.debug("SRIOV enabled for NIC %s", nic)
 
+                # ensure that path to the bind tool is valid
+                functions.settings_update_paths()
+
                 # WORKAROUND: it has been observed with IXGBE(VF) driver,
                 # that NIC doesn't correclty dispatch traffic to VFs based
                 # on their MAC address. Unbind and bind to the same driver
@@ -375,8 +512,7 @@ def disable_sriov(nic_list):
     for nic in nic_list:
         if networkcard.is_sriov_nic(nic):
             if not networkcard.set_sriov_numvfs(nic.split('|')[0], 0):
-                _LOGGER.error("SRIOV cannot be disabled for NIC %s", nic)
-                raise
+                raise RuntimeError('SRIOV cannot be disabled for NIC {}'.format(nic))
             else:
                 _LOGGER.debug("SRIOV disabled for NIC %s", nic.split('|')[0])
 
@@ -406,23 +542,42 @@ def handle_list_options(args):
         print(Loader().get_pktfwds_printable())
         sys.exit(0)
 
+    if args['list_loadgens']:
+        print(Loader().get_loadgens_printable())
+        sys.exit(0)
+
     if args['list_settings']:
         print(str(settings))
         sys.exit(0)
 
     if args['list']:
-        # configure tests
-        if args['integration']:
-            testcases = settings.getValue('INTEGRATION_TESTS')
-        else:
-            testcases = settings.getValue('PERFORMANCE_TESTS')
+        list_testcases(args)
+        sys.exit(0)
 
-        print("Available Tests:")
-        print("================")
 
-        for test in testcases:
-            print('* %-30s %s' % ('%s:' % test['Name'], test['Description']))
-        sys.exit(0)
+def list_testcases(args):
+    """ Print list of testcases requested by --list CLI argument
+
+    :param args: A dictionary with all CLI arguments
+    """
+    # configure tests
+    if args['integration']:
+        testcases = settings.getValue('INTEGRATION_TESTS')
+    else:
+        testcases = settings.getValue('PERFORMANCE_TESTS')
+
+    print("Available Tests:")
+    print("================")
+
+    for test in testcases:
+        description = functions.format_description(test['Description'], 70)
+        if len(test['Name']) < 40:
+            print('* {:40} {}'.format('{}:'.format(test['Name']), description[0]))
+        else:
+            print('* {}'.format('{}:'.format(test['Name'])))
+            print('  {:40} {}'.format('', description[0]))
+        for i in range(1, len(description)):
+            print('  {:40} {}'.format('', description[i]))
 
 
 def vsperf_finalize():
@@ -434,7 +589,7 @@ def vsperf_finalize():
         if os.path.exists(results_path):
             files_list = os.listdir(results_path)
             if files_list == []:
-                _LOGGER.info("Removing empty result directory: "  + results_path)
+                _LOGGER.info("Removing empty result directory: %s", results_path)
                 shutil.rmtree(results_path)
     except AttributeError:
         # skip it if parameter doesn't exist
@@ -476,7 +631,7 @@ class MockTestCase(unittest.TestCase):
         on how self.is_pass was set in the constructor"""
         self.assertTrue(self.is_pass, self.msg)
 
-
+# pylint: disable=too-many-locals, too-many-branches, too-many-statements
 def main():
     """Main function.
     """
@@ -506,8 +661,8 @@ def main():
 
     settings.setValue('mode', args['mode'])
 
-    # set dpdk and ovs paths according to VNF and VSWITCH
-    if settings.getValue('mode') != 'trafficgen':
+    # update paths to trafficgens if required
+    if settings.getValue('mode') == 'trafficgen':
         functions.settings_update_paths()
 
     # if required, handle list-* operations
@@ -529,7 +684,7 @@ def main():
 
     # configuration validity checks
     if args['vswitch']:
-        vswitch_none = 'none' == args['vswitch'].strip().lower()
+        vswitch_none = args['vswitch'].strip().lower() == 'none'
         if vswitch_none:
             settings.setValue('VSWITCH', 'none')
         else:
@@ -558,6 +713,14 @@ def main():
                           settings.getValue('VNF_DIR'))
             sys.exit(1)
 
+    if args['loadgen']:
+        loadgens = Loader().get_loadgens()
+        if args['loadgen'] not in loadgens:
+            _LOGGER.error('There are no loadgens matching \'%s\' found in'
+                          ' \'%s\'. Exiting...', args['loadgen'],
+                          settings.getValue('LOADGEN_DIR'))
+            sys.exit(1)
+
     if args['exact_test_name'] and args['tests']:
         _LOGGER.error("Cannot specify tests with both positional args and --test.")
         sys.exit(1)
@@ -579,9 +742,8 @@ def main():
                              'driver' : networkcard.get_driver(tmp_nic),
                              'device' : networkcard.get_device_name(tmp_nic)})
         else:
-            _LOGGER.error("Invalid network card PCI ID: '%s'", nic)
             vsperf_finalize()
-            raise
+            raise RuntimeError("Invalid network card PCI ID: '{}'".format(nic))
 
     settings.setValue('NICS', nic_list)
     # for backward compatibility
@@ -595,20 +757,16 @@ def main():
 
     # create results directory
     if not os.path.exists(results_path):
-        _LOGGER.info("Creating result directory: "  + results_path)
+        _LOGGER.info("Creating result directory: %s", results_path)
         os.makedirs(results_path)
-
+    # pylint: disable=too-many-nested-blocks
     if settings.getValue('mode') == 'trafficgen':
         # execute only traffic generator
         _LOGGER.debug("Executing traffic generator:")
         loader = Loader()
         # set traffic details, so they can be passed to traffic ctl
-        traffic = copy.deepcopy(TRAFFIC_DEFAULTS)
-        traffic.update({'traffic_type': get_test_param('traffic_type', TRAFFIC_DEFAULTS['traffic_type']),
-                        'bidir': get_test_param('bidirectional', TRAFFIC_DEFAULTS['bidir']),
-                        'multistream': int(get_test_param('multistream', TRAFFIC_DEFAULTS['multistream'])),
-                        'stream_type': get_test_param('stream_type', TRAFFIC_DEFAULTS['stream_type']),
-                        'frame_rate': int(get_test_param('iload', TRAFFIC_DEFAULTS['frame_rate']))})
+        traffic = copy.deepcopy(settings.getValue('TRAFFIC'))
+        traffic = functions.check_traffic(traffic)
 
         traffic_ctl = component_factory.create_traffic(
             traffic['traffic_type'],
@@ -631,7 +789,11 @@ def main():
         if args['exact_test_name']:
             exact_names = args['exact_test_name']
             # positional args => exact matches only
-            selected_tests = [test for test in testcases if test['Name'] in exact_names]
+            selected_tests = []
+            for test_name in exact_names:
+                for test in testcases:
+                    if test['Name'] == test_name:
+                        selected_tests.append(test)
         elif args['tests']:
             # --tests => apply filter to select requested tests
             selected_tests = apply_filter(testcases, args['tests'])
@@ -639,31 +801,57 @@ def main():
             # Default - run all tests
             selected_tests = testcases
 
-        if not len(selected_tests):
+        if not selected_tests:
             _LOGGER.error("No tests matched --tests option or positional args. Done.")
             vsperf_finalize()
             sys.exit(1)
 
-        # run tests
         suite = unittest.TestSuite()
-        for cfg in selected_tests:
+        settings_snapshot = copy.deepcopy(settings.__dict__)
+
+        for i, cfg in enumerate(selected_tests):
+            settings.setValue('_TEST_INDEX', i)
             test_name = cfg.get('Name', '<Name not set>')
             try:
+                test_params = settings.getValue('_PARAMS_LIST')
+                if isinstance(test_params, list):
+                    list_index = i
+                    if i >= len(test_params):
+                        list_index = len(test_params) - 1
+                    test_params = test_params[list_index]
+                if settings.getValue('CUMULATIVE_PARAMS'):
+                    test_params = merge_spec(settings.getValue('TEST_PARAMS'), test_params)
+                settings.setValue('TEST_PARAMS', test_params)
+
                 if args['integration']:
                     test = IntegrationTestCase(cfg)
                 else:
                     test = PerformanceTestCase(cfg)
+
                 test.run()
                 suite.addTest(MockTestCase('', True, test.name))
-            #pylint: disable=broad-except
+
+            # pylint: disable=broad-except
             except (Exception) as ex:
                 _LOGGER.exception("Failed to run test: %s", test_name)
                 suite.addTest(MockTestCase(str(ex), False, test_name))
                 _LOGGER.info("Continuing with next test...")
+            finally:
+                if not settings.getValue('CUMULATIVE_PARAMS'):
+                    settings.restore_from_dict(settings_snapshot)
+
+        settings.restore_from_dict(settings_snapshot)
+
+
+        # Generate and printout Performance Matrix
+        if args['matrix']:
+            generate_performance_matrix(selected_tests, results_path)
 
         # generate final rst report with results of all executed TCs
         generate_final_report()
 
+
+
         if settings.getValue('XUNIT'):
             xmlrunner.XMLTestRunner(
                 output=settings.getValue('XUNIT_DIR'), outsuffix="",
@@ -671,22 +859,24 @@ def main():
 
         if args['opnfvpod']:
             pod_name = args['opnfvpod']
-            installer_name = settings.getValue('OPNFV_INSTALLER')
+            installer_name = str(settings.getValue('OPNFV_INSTALLER')).lower()
             opnfv_url = settings.getValue('OPNFV_URL')
             pkg_list = settings.getValue('PACKAGE_LIST')
 
-            int_data = {'vanilla': False,
-                        'pod': pod_name,
+            int_data = {'pod': pod_name,
+                        'build_tag': get_build_tag(),
                         'installer': installer_name,
                         'pkg_list': pkg_list,
-                        'db_url': opnfv_url}
-            if settings.getValue('VSWITCH').endswith('Vanilla'):
-                int_data['vanilla'] = True
-            opnfvdashboard.results2opnfv_dashboard(results_path, int_data)
+                        'db_url': opnfv_url,
+                        # pass vswitch name from configuration to be used for failed
+                        # TCs; In case of successful TCs it is safer to use vswitch
+                        # name from CSV as TC can override global configuration
+                        'vswitch': str(settings.getValue('VSWITCH')).lower()}
+            tc_names = [tc['Name'] for tc in selected_tests]
+            opnfvdashboard.results2opnfv_dashboard(tc_names, results_path, int_data)
 
     # cleanup before exit
     vsperf_finalize()
 
 if __name__ == "__main__":
     main()
-