Python: Upgrade Python version from 3.4 to 3.6
[vswitchperf.git] / vsperf
diff --git a/vsperf b/vsperf
index 36c12df..003ca88 100755 (executable)
--- a/vsperf
+++ b/vsperf
@@ -23,6 +23,7 @@ import sys
 import argparse
 import re
 import time
+import csv
 import datetime
 import shutil
 import unittest
@@ -32,6 +33,8 @@ import glob
 import subprocess
 import ast
 import xmlrunner
+from tabulate import tabulate
+from conf import merge_spec
 from conf import settings
 import core.component_factory as component_factory
 from core.loader import Loader
@@ -42,7 +45,6 @@ from tools import networkcard
 from tools import functions
 from tools.pkt_gen import trafficgen
 from tools.opnfvdashboard import opnfvdashboard
-
 sys.dont_write_bytecode = True
 
 VERBOSITY_LEVELS = {
@@ -61,40 +63,67 @@ _TEMPLATE_RST = {'head'  : os.path.join(_CURR_DIR, 'tools/report/report_head.rst
                  'tmp'   : os.path.join(_CURR_DIR, 'tools/report/report_tmp_caption.rst')
                 }
 
+_TEMPLATE_MATRIX = "Performance Matrix\n------------------\n\n"\
+                   "The following performance matrix was generated with the results of all the\n"\
+                   "currently run tests. The metric used for comparison is {}.\n\n{}\n\n"
 
 _LOGGER = logging.getLogger()
 
+def parse_param_string(values):
+    """
+    Parse and split a single '--test-params' argument.
+
+    This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
+    values. For multiple overrides use a ; separated list for
+    e.g. --test-params 'x=z; y=(a,b)'
+    """
+    results = {}
+
+    if values == '':
+        return {}
+
+    for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
+        param = param.strip()
+        value = value.strip()
+        if param:
+            if value:
+                # values are passed inside string from CLI, so we must retype them accordingly
+                try:
+                    results[param] = ast.literal_eval(value)
+                except ValueError:
+                    # for backward compatibility, we have to accept strings without quotes
+                    _LOGGER.warning("Adding missing quotes around string value: %s = %s",
+                                    param, str(value))
+                    results[param] = str(value)
+            else:
+                results[param] = True
+    return results
+
+
 def parse_arguments():
     """
     Parse command line arguments.
     """
     class _SplitTestParamsAction(argparse.Action):
         """
-        Parse and split the '--test-params' argument.
+        Parse and split '--test-params' arguments.
 
-        This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
-        values. For multiple overrides use a ; separated list for
+        This expects either a single list of ; separated overrides
+        as 'x=y', 'x=y,z' or 'x' (implicit true) values.
         e.g. --test-params 'x=z; y=(a,b)'
+        Or a list of these ; separated lists with overrides for
+        multiple tests.
+        e.g. --test-params "['x=z; y=(a,b)','x=z']"
         """
         def __call__(self, parser, namespace, values, option_string=None):
-            results = {}
-
-            for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
-                param = param.strip()
-                value = value.strip()
-                if len(param):
-                    if len(value):
-                        # values are passed inside string from CLI, so we must retype them accordingly
-                        try:
-                            results[param] = ast.literal_eval(value)
-                        except ValueError:
-                            # for backward compatibility, we have to accept strings without quotes
-                            _LOGGER.warning("Adding missing quotes around string value: %s = %s",
-                                            param, str(value))
-                            results[param] = str(value)
-                    else:
-                        results[param] = True
-
+            if values[0] == '[':
+                input_list = ast.literal_eval(values)
+                parameter_list = []
+                for test_params in input_list:
+                    parameter_list.append(parse_param_string(test_params))
+            else:
+                parameter_list = parse_param_string(values)
+            results = {'_PARAMS_LIST':parameter_list}
             setattr(namespace, self.dest, results)
 
     class _ValidateFileAction(argparse.Action):
@@ -126,7 +155,7 @@ def parse_arguments():
     def list_logging_levels():
         """Give a summary of all available logging levels.
 
-       :return: List of verbosity level names in decreasing order of
+        :return: List of verbosity level names in decreasing order of
             verbosity
         """
         return sorted(VERBOSITY_LEVELS.keys(),
@@ -147,6 +176,8 @@ def parse_arguments():
                         help='list all system forwarding applications and exit')
     parser.add_argument('--list-vnfs', action='store_true',
                         help='list all system vnfs and exit')
+    parser.add_argument('--list-loadgens', action='store_true',
+                        help='list all background load generators')
     parser.add_argument('--list-settings', action='store_true',
                         help='list effective settings configuration and exit')
     parser.add_argument('exact_test_name', nargs='*', help='Exact names of\
@@ -174,6 +205,7 @@ def parse_arguments():
     group.add_argument('--vswitch', help='vswitch implementation to use')
     group.add_argument('--fwdapp', help='packet forwarding application to use')
     group.add_argument('--vnf', help='vnf to use')
+    group.add_argument('--loadgen', help='loadgen to use')
     group.add_argument('--sysmetrics', help='system metrics logger to use')
     group = parser.add_argument_group('test behavior options')
     group.add_argument('--xunit', action='store_true',
@@ -186,9 +218,14 @@ def parse_arguments():
                        help='settings file')
     group.add_argument('--test-params', action=_SplitTestParamsAction,
                        help='csv list of test parameters: key=val; e.g. '
-                       'TRAFFICGEN_PKT_SIZES=(64,128);TRAFICGEN_DURATION=30; '
-                       'GUEST_LOOPBACK=["l2fwd"] ...')
+                       'TRAFFICGEN_PKT_SIZES=(64,128);TRAFFICGEN_DURATION=30; '
+                       'GUEST_LOOPBACK=["l2fwd"] ...'
+                       ' or a list of csv lists of test parameters: key=val; e.g. '
+                       '[\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)\','
+                       '\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)\']')
     group.add_argument('--opnfvpod', help='name of POD in opnfv')
+    group.add_argument('--matrix', help='enable performance matrix analysis',
+                       action='store_true', default=False)
 
     args = vars(parser.parse_args())
 
@@ -198,13 +235,31 @@ def parse_arguments():
 def configure_logging(level):
     """Configure logging.
     """
+    name, ext = os.path.splitext(settings.getValue('LOG_FILE_DEFAULT'))
+    rename_default = "{name}_{uid}{ex}".format(name=name,
+                                               uid=settings.getValue(
+                                                   'LOG_TIMESTAMP'),
+                                               ex=ext)
     log_file_default = os.path.join(
-        settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_DEFAULT'))
+        settings.getValue('RESULTS_PATH'), rename_default)
+    name, ext = os.path.splitext(settings.getValue('LOG_FILE_HOST_CMDS'))
+    rename_hostcmd = "{name}_{uid}{ex}".format(name=name,
+                                               uid=settings.getValue(
+                                                   'LOG_TIMESTAMP'),
+                                               ex=ext)
     log_file_host_cmds = os.path.join(
-        settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_HOST_CMDS'))
+        settings.getValue('RESULTS_PATH'), rename_hostcmd)
+    name, ext = os.path.splitext(settings.getValue('LOG_FILE_TRAFFIC_GEN'))
+    rename_traffic = "{name}_{uid}{ex}".format(name=name,
+                                               uid=settings.getValue(
+                                                   'LOG_TIMESTAMP'),
+                                               ex=ext)
     log_file_traffic_gen = os.path.join(
-        settings.getValue('LOG_DIR'),
-        settings.getValue('LOG_FILE_TRAFFIC_GEN'))
+        settings.getValue('RESULTS_PATH'), rename_traffic)
+    metrics_file = (settings.getValue('LOG_FILE_INFRA_METRICS_PFX') +
+                    settings.getValue('LOG_TIMESTAMP') + '.log')
+    log_file_infra_metrics = os.path.join(settings.getValue('LOG_DIR'),
+                                          metrics_file)
 
     _LOGGER.setLevel(logging.DEBUG)
 
@@ -216,6 +271,8 @@ def configure_logging(level):
 
     file_logger = logging.FileHandler(filename=log_file_default)
     file_logger.setLevel(logging.DEBUG)
+    file_logger.setFormatter(logging.Formatter(
+        '%(asctime)s : %(message)s'))
     _LOGGER.addHandler(file_logger)
 
     class CommandFilter(logging.Filter):
@@ -228,6 +285,11 @@ def configure_logging(level):
         def filter(self, record):
             return record.getMessage().startswith(trafficgen.CMD_PREFIX)
 
+    class CollectdMetricsFilter(logging.Filter):
+        """Filter out strings beginning with 'COLLECTD' :'"""
+        def filter(self, record):
+            return record.getMessage().startswith('COLLECTD')
+
     cmd_logger = logging.FileHandler(filename=log_file_host_cmds)
     cmd_logger.setLevel(logging.DEBUG)
     cmd_logger.addFilter(CommandFilter())
@@ -238,6 +300,12 @@ def configure_logging(level):
     gen_logger.addFilter(TrafficGenCommandFilter())
     _LOGGER.addHandler(gen_logger)
 
+    if settings.getValue('COLLECTOR') == 'Collectd':
+        met_logger = logging.FileHandler(filename=log_file_infra_metrics)
+        met_logger.setLevel(logging.DEBUG)
+        met_logger.addFilter(CollectdMetricsFilter())
+        _LOGGER.addHandler(met_logger)
+
 
 def apply_filter(tests, tc_filter):
     """Allow a subset of tests to be conveniently selected
@@ -289,7 +357,7 @@ def get_vswitch_names(rst_files):
     """ Function will return a list of vSwitches detected in given ``rst_files``.
     """
     vswitch_names = set()
-    if len(rst_files):
+    if rst_files:
         try:
             output = subprocess.check_output(['grep', '-h', '^* vSwitch'] + rst_files).decode().splitlines()
             for line in output:
@@ -297,7 +365,7 @@ def get_vswitch_names(rst_files):
                 if match:
                     vswitch_names.add(match.group(1))
 
-            if len(vswitch_names):
+            if vswitch_names:
                 return list(vswitch_names)
 
         except subprocess.CalledProcessError:
@@ -328,7 +396,7 @@ def generate_final_report():
     # check if there are any results in rst format
     rst_results = glob.glob(os.path.join(path, 'result*rst'))
     pkt_processors = get_vswitch_names(rst_results)
-    if len(rst_results):
+    if rst_results:
         try:
             test_report = os.path.join(path, '{}_{}'.format('_'.join(pkt_processors), _TEMPLATE_RST['final']))
             # create report caption directly - it is not worth to execute jinja machinery
@@ -356,6 +424,69 @@ def generate_final_report():
             _LOGGER.error('Generatrion of overall test report has failed.')
 
 
+def generate_performance_matrix(selected_tests, results_path):
+    """
+    Loads the results of all the currently run tests, compares them
+    based on the MATRIX_METRIC, outputs and saves the generated table.
+    :selected_tests: list of currently run test
+    :results_path: directory path to the results of current tests
+    """
+    _LOGGER.info('Performance Matrix:')
+    test_list = []
+
+    for test in selected_tests:
+        test_name = test.get('Name', '<Name not set>')
+        test_deployment = test.get('Deployment', '<Deployment not set>')
+        test_list.append({'test_name':test_name, 'test_deployment':test_deployment, 'csv_data':False})
+
+    test_params = {}
+    output = []
+    all_params = settings.getValue('_PARAMS_LIST')
+    for i in range(len(selected_tests)):
+        test = test_list[i]
+        if isinstance(all_params, list):
+            list_index = i
+            if i >= len(all_params):
+                list_index = len(all_params) - 1
+            if settings.getValue('CUMULATIVE_PARAMS') and (i > 0):
+                test_params.update(all_params[list_index])
+            else:
+                test_params = all_params[list_index]
+        else:
+            test_params = all_params
+        settings.setValue('TEST_PARAMS', test_params)
+        test['test_params'] = copy.deepcopy(test_params)
+        try:
+            with open("{}/result_{}_{}_{}.csv".format(results_path, str(i),
+                                                      test['test_name'], test['test_deployment'])) as csvfile:
+                reader = list(csv.DictReader(csvfile))
+                test['csv_data'] = reader[0]
+        # pylint: disable=broad-except
+        except (Exception) as ex:
+            _LOGGER.error("Result file not found: %s", ex)
+
+    metric = settings.getValue('MATRIX_METRIC')
+    change = {}
+    output_header = ("ID", "Name", metric, "Change [%]", "Parameters, "\
+                     "CUMULATIVE_PARAMS = {}".format(settings.getValue('CUMULATIVE_PARAMS')))
+    if not test_list[0]['csv_data'] or float(test_list[0]['csv_data'][metric]) == 0:
+        _LOGGER.error("Incorrect format of test results")
+        return
+    for i, test in enumerate(test_list):
+        if test['csv_data']:
+            change[i] = float(test['csv_data'][metric])/\
+                        (float(test_list[0]['csv_data'][metric]) / 100) - 100
+            output.append([i, test['test_name'], float(test['csv_data'][metric]),
+                           change[i], str(test['test_params'])[1:-1]])
+        else:
+            change[i] = 0
+            output.append([i, test['test_name'], "Test Failed", 0, test['test_params']])
+    print(tabulate(output, headers=output_header, tablefmt="grid", floatfmt="0.3f"))
+    with open(results_path + '/result_performance_matrix.rst', 'w+') as output_file:
+        output_file.write(_TEMPLATE_MATRIX.format(metric, tabulate(output, headers=output_header,
+                                                                   tablefmt="rst", floatfmt="0.3f")))
+        _LOGGER.info('Performance matrix written to: "%s/result_performance_matrix.rst"', results_path)
+
 def enable_sriov(nic_list):
     """ Enable SRIOV for given enhanced PCI IDs
 
@@ -373,7 +504,7 @@ def enable_sriov(nic_list):
                 sriov_nic.update({tmp_nic[0] : int(tmp_nic[1][2:])})
 
     # sriov is required for some NICs
-    if len(sriov_nic):
+    if sriov_nic:
         for nic in sriov_nic:
             # check if SRIOV is supported and enough virt interfaces are available
             if not networkcard.is_sriov_supported(nic) \
@@ -384,6 +515,9 @@ def enable_sriov(nic_list):
                 else:
                     _LOGGER.debug("SRIOV enabled for NIC %s", nic)
 
+                # ensure that path to the bind tool is valid
+                functions.settings_update_paths()
+
                 # WORKAROUND: it has been observed with IXGBE(VF) driver,
                 # that NIC doesn't correclty dispatch traffic to VFs based
                 # on their MAC address. Unbind and bind to the same driver
@@ -439,23 +573,42 @@ def handle_list_options(args):
         print(Loader().get_pktfwds_printable())
         sys.exit(0)
 
+    if args['list_loadgens']:
+        print(Loader().get_loadgens_printable())
+        sys.exit(0)
+
     if args['list_settings']:
         print(str(settings))
         sys.exit(0)
 
     if args['list']:
-        # configure tests
-        if args['integration']:
-            testcases = settings.getValue('INTEGRATION_TESTS')
-        else:
-            testcases = settings.getValue('PERFORMANCE_TESTS')
+        list_testcases(args)
+        sys.exit(0)
 
-        print("Available Tests:")
-        print("================")
 
-        for test in testcases:
-            print('* %-30s %s' % ('%s:' % test['Name'], test['Description']))
-        sys.exit(0)
+def list_testcases(args):
+    """ Print list of testcases requested by --list CLI argument
+
+    :param args: A dictionary with all CLI arguments
+    """
+    # configure tests
+    if args['integration']:
+        testcases = settings.getValue('INTEGRATION_TESTS')
+    else:
+        testcases = settings.getValue('PERFORMANCE_TESTS')
+
+    print("Available Tests:")
+    print("================")
+
+    for test in testcases:
+        description = functions.format_description(test['Description'], 70)
+        if len(test['Name']) < 40:
+            print('* {:40} {}'.format('{}:'.format(test['Name']), description[0]))
+        else:
+            print('* {}'.format('{}:'.format(test['Name'])))
+            print('  {:40} {}'.format('', description[0]))
+        for i in range(1, len(description)):
+            print('  {:40} {}'.format('', description[i]))
 
 
 def vsperf_finalize():
@@ -467,7 +620,7 @@ def vsperf_finalize():
         if os.path.exists(results_path):
             files_list = os.listdir(results_path)
             if files_list == []:
-                _LOGGER.info("Removing empty result directory: "  + results_path)
+                _LOGGER.info("Removing empty result directory: %s", results_path)
                 shutil.rmtree(results_path)
     except AttributeError:
         # skip it if parameter doesn't exist
@@ -519,6 +672,11 @@ def main():
 
     settings.load_from_dir(os.path.join(_CURR_DIR, 'conf'))
 
+    # Define the timestamp to be used by logs and results
+    date = datetime.datetime.fromtimestamp(time.time())
+    timestamp = date.strftime('%Y-%m-%d_%H-%M-%S')
+    settings.setValue('LOG_TIMESTAMP', timestamp)
+
     # Load non performance/integration tests
     if args['integration']:
         settings.load_from_dir(os.path.join(_CURR_DIR, 'conf/integration'))
@@ -539,15 +697,27 @@ def main():
 
     settings.setValue('mode', args['mode'])
 
-    # set dpdk and ovs paths according to VNF and VSWITCH
-    if settings.getValue('mode') != 'trafficgen':
+    # update paths to trafficgens if required
+    if settings.getValue('mode') == 'trafficgen':
         functions.settings_update_paths()
 
     # if required, handle list-* operations
     handle_list_options(args)
 
+    # generate results directory name
+    results_dir = "results_" + timestamp
+    results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
+    settings.setValue('RESULTS_PATH', results_path)
+
+    # create results directory
+    if not os.path.exists(results_path):
+        os.makedirs(results_path)
+
     configure_logging(settings.getValue('VERBOSITY'))
 
+    # CI build support
+    _LOGGER.info("Creating result directory: %s", results_path)
+
     # check and fix locale
     check_and_set_locale()
 
@@ -591,6 +761,14 @@ def main():
                           settings.getValue('VNF_DIR'))
             sys.exit(1)
 
+    if args['loadgen']:
+        loadgens = Loader().get_loadgens()
+        if args['loadgen'] not in loadgens:
+            _LOGGER.error('There are no loadgens matching \'%s\' found in'
+                          ' \'%s\'. Exiting...', args['loadgen'],
+                          settings.getValue('LOADGEN_DIR'))
+            sys.exit(1)
+
     if args['exact_test_name'] and args['tests']:
         _LOGGER.error("Cannot specify tests with both positional args and --test.")
         sys.exit(1)
@@ -619,24 +797,14 @@ def main():
     # for backward compatibility
     settings.setValue('WHITELIST_NICS', list(nic['pci'] for nic in nic_list))
 
-    # generate results directory name
-    date = datetime.datetime.fromtimestamp(time.time())
-    results_dir = "results_" + date.strftime('%Y-%m-%d_%H-%M-%S')
-    results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
-    settings.setValue('RESULTS_PATH', results_path)
-
-    # create results directory
-    if not os.path.exists(results_path):
-        _LOGGER.info("Creating result directory: "  + results_path)
-        os.makedirs(results_path)
 
+    # pylint: disable=too-many-nested-blocks
     if settings.getValue('mode') == 'trafficgen':
         # execute only traffic generator
         _LOGGER.debug("Executing traffic generator:")
         loader = Loader()
         # set traffic details, so they can be passed to traffic ctl
         traffic = copy.deepcopy(settings.getValue('TRAFFIC'))
-
         traffic = functions.check_traffic(traffic)
 
         traffic_ctl = component_factory.create_traffic(
@@ -660,7 +828,11 @@ def main():
         if args['exact_test_name']:
             exact_names = args['exact_test_name']
             # positional args => exact matches only
-            selected_tests = [test for test in testcases if test['Name'] in exact_names]
+            selected_tests = []
+            for test_name in exact_names:
+                for test in testcases:
+                    if test['Name'] == test_name:
+                        selected_tests.append(test)
         elif args['tests']:
             # --tests => apply filter to select requested tests
             selected_tests = apply_filter(testcases, args['tests'])
@@ -668,34 +840,57 @@ def main():
             # Default - run all tests
             selected_tests = testcases
 
-        if not len(selected_tests):
+        if not selected_tests:
             _LOGGER.error("No tests matched --tests option or positional args. Done.")
             vsperf_finalize()
             sys.exit(1)
 
-        # run tests
-        # Add pylint exception: Redefinition of test type from
-        # testcases.integration.IntegrationTestCase to testcases.performance.PerformanceTestCase
-        # pylint: disable=redefined-variable-type
         suite = unittest.TestSuite()
-        for cfg in selected_tests:
+        settings_snapshot = copy.deepcopy(settings.__dict__)
+
+        for i, cfg in enumerate(selected_tests):
+            settings.setValue('_TEST_INDEX', i)
             test_name = cfg.get('Name', '<Name not set>')
             try:
+                test_params = settings.getValue('_PARAMS_LIST')
+                if isinstance(test_params, list):
+                    list_index = i
+                    if i >= len(test_params):
+                        list_index = len(test_params) - 1
+                    test_params = test_params[list_index]
+                if settings.getValue('CUMULATIVE_PARAMS'):
+                    test_params = merge_spec(settings.getValue('TEST_PARAMS'), test_params)
+                settings.setValue('TEST_PARAMS', test_params)
+
                 if args['integration']:
                     test = IntegrationTestCase(cfg)
                 else:
                     test = PerformanceTestCase(cfg)
+
                 test.run()
                 suite.addTest(MockTestCase('', True, test.name))
+
             # pylint: disable=broad-except
             except (Exception) as ex:
                 _LOGGER.exception("Failed to run test: %s", test_name)
                 suite.addTest(MockTestCase(str(ex), False, test_name))
                 _LOGGER.info("Continuing with next test...")
+            finally:
+                if not settings.getValue('CUMULATIVE_PARAMS'):
+                    settings.restore_from_dict(settings_snapshot)
+
+        settings.restore_from_dict(settings_snapshot)
+
+
+        # Generate and printout Performance Matrix
+        if args['matrix']:
+            generate_performance_matrix(selected_tests, results_path)
 
         # generate final rst report with results of all executed TCs
         generate_final_report()
 
+
+
         if settings.getValue('XUNIT'):
             xmlrunner.XMLTestRunner(
                 output=settings.getValue('XUNIT_DIR'), outsuffix="",
@@ -708,12 +903,16 @@ def main():
             pkg_list = settings.getValue('PACKAGE_LIST')
 
             int_data = {'pod': pod_name,
-                        'criteria': "PASS",
                         'build_tag': get_build_tag(),
                         'installer': installer_name,
                         'pkg_list': pkg_list,
-                        'db_url': opnfv_url}
-            opnfvdashboard.results2opnfv_dashboard(results_path, int_data)
+                        'db_url': opnfv_url,
+                        # pass vswitch name from configuration to be used for failed
+                        # TCs; In case of successful TCs it is safer to use vswitch
+                        # name from CSV as TC can override global configuration
+                        'vswitch': str(settings.getValue('VSWITCH')).lower()}
+            tc_names = [tc['Name'] for tc in selected_tests]
+            opnfvdashboard.results2opnfv_dashboard(tc_names, results_path, int_data)
 
     # cleanup before exit
     vsperf_finalize()