Logs: Move test-specific logs to test-specific results folder
[vswitchperf.git] / vsperf
diff --git a/vsperf b/vsperf
index 4b1d86d..003ca88 100755 (executable)
--- a/vsperf
+++ b/vsperf
@@ -1,6 +1,6 @@
 #!/usr/bin/env python3
 
 #!/usr/bin/env python3
 
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,27 +21,31 @@ import logging
 import os
 import sys
 import argparse
 import os
 import sys
 import argparse
+import re
 import time
 import time
+import csv
 import datetime
 import shutil
 import unittest
 import datetime
 import shutil
 import unittest
-import xmlrunner
 import locale
 import copy
 import glob
 import subprocess
 import locale
 import copy
 import glob
 import subprocess
-
-sys.dont_write_bytecode = True
-
+import ast
+import xmlrunner
+from tabulate import tabulate
+from conf import merge_spec
 from conf import settings
 from conf import settings
-from conf import get_test_param
+import core.component_factory as component_factory
 from core.loader import Loader
 from core.loader import Loader
-from testcases import TestCase
+from testcases import PerformanceTestCase
+from testcases import IntegrationTestCase
 from tools import tasks
 from tools import tasks
+from tools import networkcard
+from tools import functions
 from tools.pkt_gen import trafficgen
 from tools.opnfvdashboard import opnfvdashboard
 from tools.pkt_gen import trafficgen
 from tools.opnfvdashboard import opnfvdashboard
-from tools.pkt_gen.trafficgen.trafficgenhelper import TRAFFIC_DEFAULTS
-import core.component_factory as component_factory
+sys.dont_write_bytecode = True
 
 VERBOSITY_LEVELS = {
     'debug': logging.DEBUG,
 
 VERBOSITY_LEVELS = {
     'debug': logging.DEBUG,
@@ -51,37 +55,75 @@ VERBOSITY_LEVELS = {
     'critical': logging.CRITICAL
 }
 
     'critical': logging.CRITICAL
 }
 
-_TEMPLATE_RST = {'head'  : 'tools/report/report_head.rst',
-                 'foot'  : 'tools/report/report_foot.rst',
-                 'final' : 'test_report.rst'
+_CURR_DIR = os.path.dirname(os.path.realpath(__file__))
+
+_TEMPLATE_RST = {'head'  : os.path.join(_CURR_DIR, 'tools/report/report_head.rst'),
+                 'foot'  : os.path.join(_CURR_DIR, 'tools/report/report_foot.rst'),
+                 'final' : 'test_report.rst',
+                 'tmp'   : os.path.join(_CURR_DIR, 'tools/report/report_tmp_caption.rst')
                 }
 
                 }
 
+_TEMPLATE_MATRIX = "Performance Matrix\n------------------\n\n"\
+                   "The following performance matrix was generated with the results of all the\n"\
+                   "currently run tests. The metric used for comparison is {}.\n\n{}\n\n"
+
+_LOGGER = logging.getLogger()
+
+def parse_param_string(values):
+    """
+    Parse and split a single '--test-params' argument.
+
+    This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
+    values. For multiple overrides use a ; separated list for
+    e.g. --test-params 'x=z; y=(a,b)'
+    """
+    results = {}
+
+    if values == '':
+        return {}
+
+    for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
+        param = param.strip()
+        value = value.strip()
+        if param:
+            if value:
+                # values are passed inside string from CLI, so we must retype them accordingly
+                try:
+                    results[param] = ast.literal_eval(value)
+                except ValueError:
+                    # for backward compatibility, we have to accept strings without quotes
+                    _LOGGER.warning("Adding missing quotes around string value: %s = %s",
+                                    param, str(value))
+                    results[param] = str(value)
+            else:
+                results[param] = True
+    return results
+
+
 def parse_arguments():
     """
     Parse command line arguments.
     """
     class _SplitTestParamsAction(argparse.Action):
         """
 def parse_arguments():
     """
     Parse command line arguments.
     """
     class _SplitTestParamsAction(argparse.Action):
         """
-        Parse and split the '--test-params' argument.
-
-        This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
-        values. For multiple overrides use a ; separated list for
-        e.g. --test-params 'x=z; y=a,b'
+        Parse and split '--test-params' arguments.
+
+        This expects either a single list of ; separated overrides
+        as 'x=y', 'x=y,z' or 'x' (implicit true) values.
+        e.g. --test-params 'x=z; y=(a,b)'
+        Or a list of these ; separated lists with overrides for
+        multiple tests.
+        e.g. --test-params "['x=z; y=(a,b)','x=z']"
         """
         def __call__(self, parser, namespace, values, option_string=None):
         """
         def __call__(self, parser, namespace, values, option_string=None):
-            results = {}
-
-            for value in values.split(';'):
-                result = [key.strip() for key in value.split('=')]
-                if len(result) == 1:
-                    results[result[0]] = True
-                elif len(result) == 2:
-                    results[result[0]] = result[1]
-                else:
-                    raise argparse.ArgumentTypeError(
-                        'expected \'%s\' to be of format \'key=val\' or'
-                        ' \'key\'' % result)
-
+            if values[0] == '[':
+                input_list = ast.literal_eval(values)
+                parameter_list = []
+                for test_params in input_list:
+                    parameter_list.append(parse_param_string(test_params))
+            else:
+                parameter_list = parse_param_string(values)
+            results = {'_PARAMS_LIST':parameter_list}
             setattr(namespace, self.dest, results)
 
     class _ValidateFileAction(argparse.Action):
             setattr(namespace, self.dest, results)
 
     class _ValidateFileAction(argparse.Action):
@@ -113,7 +155,7 @@ def parse_arguments():
     def list_logging_levels():
         """Give a summary of all available logging levels.
 
     def list_logging_levels():
         """Give a summary of all available logging levels.
 
-       :return: List of verbosity level names in decreasing order of
+        :return: List of verbosity level names in decreasing order of
             verbosity
         """
         return sorted(VERBOSITY_LEVELS.keys(),
             verbosity
         """
         return sorted(VERBOSITY_LEVELS.keys(),
@@ -134,6 +176,8 @@ def parse_arguments():
                         help='list all system forwarding applications and exit')
     parser.add_argument('--list-vnfs', action='store_true',
                         help='list all system vnfs and exit')
                         help='list all system forwarding applications and exit')
     parser.add_argument('--list-vnfs', action='store_true',
                         help='list all system vnfs and exit')
+    parser.add_argument('--list-loadgens', action='store_true',
+                        help='list all background load generators')
     parser.add_argument('--list-settings', action='store_true',
                         help='list effective settings configuration and exit')
     parser.add_argument('exact_test_name', nargs='*', help='Exact names of\
     parser.add_argument('--list-settings', action='store_true',
                         help='list effective settings configuration and exit')
     parser.add_argument('exact_test_name', nargs='*', help='Exact names of\
@@ -145,20 +189,23 @@ def parse_arguments():
     group.add_argument('-m', '--mode', help='vsperf mode of operation;\
             Values: "normal" - execute vSwitch, VNF and traffic generator;\
             "trafficgen" - execute only traffic generator; "trafficgen-off" \
     group.add_argument('-m', '--mode', help='vsperf mode of operation;\
             Values: "normal" - execute vSwitch, VNF and traffic generator;\
             "trafficgen" - execute only traffic generator; "trafficgen-off" \
-            - execute vSwitch and VNF', default='normal')
+            - execute vSwitch and VNF; trafficgen-pause - execute vSwitch \
+            and VNF but pause before traffic transmission ', default='normal')
 
     group.add_argument('-f', '--test-spec', help='test specification file')
     group.add_argument('-d', '--test-dir', help='directory containing tests')
     group.add_argument('-t', '--tests', help='Comma-separated list of terms \
             indicating tests to run. e.g. "RFC2544,!p2p" - run all tests whose\
 
     group.add_argument('-f', '--test-spec', help='test specification file')
     group.add_argument('-d', '--test-dir', help='directory containing tests')
     group.add_argument('-t', '--tests', help='Comma-separated list of terms \
             indicating tests to run. e.g. "RFC2544,!p2p" - run all tests whose\
-            name contains RFC2544 less those containing "p2p"')
+            name contains RFC2544 less those containing "p2p"; "!back2back" - \
+            run all tests except those containing back2back')
     group.add_argument('--verbosity', choices=list_logging_levels(),
                        help='debug level')
     group.add_argument('--verbosity', choices=list_logging_levels(),
                        help='debug level')
+    group.add_argument('--integration', action='store_true', help='execute integration tests')
     group.add_argument('--trafficgen', help='traffic generator to use')
     group.add_argument('--vswitch', help='vswitch implementation to use')
     group.add_argument('--fwdapp', help='packet forwarding application to use')
     group.add_argument('--vnf', help='vnf to use')
     group.add_argument('--trafficgen', help='traffic generator to use')
     group.add_argument('--vswitch', help='vswitch implementation to use')
     group.add_argument('--fwdapp', help='packet forwarding application to use')
     group.add_argument('--vnf', help='vnf to use')
-    group.add_argument('--duration', help='traffic transmit duration')
+    group.add_argument('--loadgen', help='loadgen to use')
     group.add_argument('--sysmetrics', help='system metrics logger to use')
     group = parser.add_argument_group('test behavior options')
     group.add_argument('--xunit', action='store_true',
     group.add_argument('--sysmetrics', help='system metrics logger to use')
     group = parser.add_argument_group('test behavior options')
     group.add_argument('--xunit', action='store_true',
@@ -170,10 +217,15 @@ def parse_arguments():
     group.add_argument('--conf-file', action=_ValidateFileAction,
                        help='settings file')
     group.add_argument('--test-params', action=_SplitTestParamsAction,
     group.add_argument('--conf-file', action=_ValidateFileAction,
                        help='settings file')
     group.add_argument('--test-params', action=_SplitTestParamsAction,
-                       help='csv list of test parameters: key=val; e.g.'
-                       'including pkt_sizes=x,y; duration=x; '
-                       'rfc2544_trials=x ...')
+                       help='csv list of test parameters: key=val; e.g. '
+                       'TRAFFICGEN_PKT_SIZES=(64,128);TRAFFICGEN_DURATION=30; '
+                       'GUEST_LOOPBACK=["l2fwd"] ...'
+                       ' or a list of csv lists of test parameters: key=val; e.g. '
+                       '[\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)\','
+                       '\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)\']')
     group.add_argument('--opnfvpod', help='name of POD in opnfv')
     group.add_argument('--opnfvpod', help='name of POD in opnfv')
+    group.add_argument('--matrix', help='enable performance matrix analysis',
+                       action='store_true', default=False)
 
     args = vars(parser.parse_args())
 
 
     args = vars(parser.parse_args())
 
@@ -183,26 +235,45 @@ def parse_arguments():
 def configure_logging(level):
     """Configure logging.
     """
 def configure_logging(level):
     """Configure logging.
     """
+    name, ext = os.path.splitext(settings.getValue('LOG_FILE_DEFAULT'))
+    rename_default = "{name}_{uid}{ex}".format(name=name,
+                                               uid=settings.getValue(
+                                                   'LOG_TIMESTAMP'),
+                                               ex=ext)
     log_file_default = os.path.join(
     log_file_default = os.path.join(
-        settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_DEFAULT'))
+        settings.getValue('RESULTS_PATH'), rename_default)
+    name, ext = os.path.splitext(settings.getValue('LOG_FILE_HOST_CMDS'))
+    rename_hostcmd = "{name}_{uid}{ex}".format(name=name,
+                                               uid=settings.getValue(
+                                                   'LOG_TIMESTAMP'),
+                                               ex=ext)
     log_file_host_cmds = os.path.join(
     log_file_host_cmds = os.path.join(
-        settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_HOST_CMDS'))
+        settings.getValue('RESULTS_PATH'), rename_hostcmd)
+    name, ext = os.path.splitext(settings.getValue('LOG_FILE_TRAFFIC_GEN'))
+    rename_traffic = "{name}_{uid}{ex}".format(name=name,
+                                               uid=settings.getValue(
+                                                   'LOG_TIMESTAMP'),
+                                               ex=ext)
     log_file_traffic_gen = os.path.join(
     log_file_traffic_gen = os.path.join(
-        settings.getValue('LOG_DIR'),
-        settings.getValue('LOG_FILE_TRAFFIC_GEN'))
+        settings.getValue('RESULTS_PATH'), rename_traffic)
+    metrics_file = (settings.getValue('LOG_FILE_INFRA_METRICS_PFX') +
+                    settings.getValue('LOG_TIMESTAMP') + '.log')
+    log_file_infra_metrics = os.path.join(settings.getValue('LOG_DIR'),
+                                          metrics_file)
 
 
-    logger = logging.getLogger()
-    logger.setLevel(logging.DEBUG)
+    _LOGGER.setLevel(logging.DEBUG)
 
     stream_logger = logging.StreamHandler(sys.stdout)
     stream_logger.setLevel(VERBOSITY_LEVELS[level])
     stream_logger.setFormatter(logging.Formatter(
 
     stream_logger = logging.StreamHandler(sys.stdout)
     stream_logger.setLevel(VERBOSITY_LEVELS[level])
     stream_logger.setFormatter(logging.Formatter(
-        '[%(levelname)s]  %(asctime)s : (%(name)s) - %(message)s'))
-    logger.addHandler(stream_logger)
+        '[%(levelname)-5s]  %(asctime)s : (%(name)s) - %(message)s'))
+    _LOGGER.addHandler(stream_logger)
 
     file_logger = logging.FileHandler(filename=log_file_default)
     file_logger.setLevel(logging.DEBUG)
 
     file_logger = logging.FileHandler(filename=log_file_default)
     file_logger.setLevel(logging.DEBUG)
-    logger.addHandler(file_logger)
+    file_logger.setFormatter(logging.Formatter(
+        '%(asctime)s : %(message)s'))
+    _LOGGER.addHandler(file_logger)
 
     class CommandFilter(logging.Filter):
         """Filter out strings beginning with 'cmd :'"""
 
     class CommandFilter(logging.Filter):
         """Filter out strings beginning with 'cmd :'"""
@@ -214,15 +285,26 @@ def configure_logging(level):
         def filter(self, record):
             return record.getMessage().startswith(trafficgen.CMD_PREFIX)
 
         def filter(self, record):
             return record.getMessage().startswith(trafficgen.CMD_PREFIX)
 
+    class CollectdMetricsFilter(logging.Filter):
+        """Filter out strings beginning with 'COLLECTD' :'"""
+        def filter(self, record):
+            return record.getMessage().startswith('COLLECTD')
+
     cmd_logger = logging.FileHandler(filename=log_file_host_cmds)
     cmd_logger.setLevel(logging.DEBUG)
     cmd_logger.addFilter(CommandFilter())
     cmd_logger = logging.FileHandler(filename=log_file_host_cmds)
     cmd_logger.setLevel(logging.DEBUG)
     cmd_logger.addFilter(CommandFilter())
-    logger.addHandler(cmd_logger)
+    _LOGGER.addHandler(cmd_logger)
 
     gen_logger = logging.FileHandler(filename=log_file_traffic_gen)
     gen_logger.setLevel(logging.DEBUG)
     gen_logger.addFilter(TrafficGenCommandFilter())
 
     gen_logger = logging.FileHandler(filename=log_file_traffic_gen)
     gen_logger.setLevel(logging.DEBUG)
     gen_logger.addFilter(TrafficGenCommandFilter())
-    logger.addHandler(gen_logger)
+    _LOGGER.addHandler(gen_logger)
+
+    if settings.getValue('COLLECTOR') == 'Collectd':
+        met_logger = logging.FileHandler(filename=log_file_infra_metrics)
+        met_logger.setLevel(logging.DEBUG)
+        met_logger.addFilter(CollectdMetricsFilter())
+        _LOGGER.addHandler(met_logger)
 
 
 def apply_filter(tests, tc_filter):
 
 
 def apply_filter(tests, tc_filter):
@@ -239,7 +321,11 @@ def apply_filter(tests, tc_filter):
         e.g. '' - empty string selects all tests.
     :return: A list of the selected Tests.
     """
         e.g. '' - empty string selects all tests.
     :return: A list of the selected Tests.
     """
-    result = []
+    # if negative filter is first we have to start with full list of tests
+    if tc_filter.strip()[0] == '!':
+        result = tests
+    else:
+        result = []
     if tc_filter is None:
         tc_filter = ""
 
     if tc_filter is None:
         tc_filter = ""
 
@@ -247,11 +333,11 @@ def apply_filter(tests, tc_filter):
         if not term or term[0] != '!':
             # Add matching tests from 'tests' into results
             result.extend([test for test in tests \
         if not term or term[0] != '!':
             # Add matching tests from 'tests' into results
             result.extend([test for test in tests \
-                if test.name.lower().find(term) >= 0])
+                if test['Name'].lower().find(term) >= 0])
         else:
             # Term begins with '!' so we remove matching tests
             result = [test for test in result \
         else:
             # Term begins with '!' so we remove matching tests
             result = [test for test in result \
-                if test.name.lower().find(term[1:]) < 0]
+                if test['Name'].lower().find(term[1:]) < 0]
 
     return result
 
 
     return result
 
@@ -264,28 +350,289 @@ def check_and_set_locale():
     system_locale = locale.getdefaultlocale()
     if None in system_locale:
         os.environ['LC_ALL'] = settings.getValue('DEFAULT_LOCALE')
     system_locale = locale.getdefaultlocale()
     if None in system_locale:
         os.environ['LC_ALL'] = settings.getValue('DEFAULT_LOCALE')
-        logging.warning("Locale was not properly configured. Default values were set. Old locale: %s, New locale: %s",
+        _LOGGER.warning("Locale was not properly configured. Default values were set. Old locale: %s, New locale: %s",
                         system_locale, locale.getdefaultlocale())
 
                         system_locale, locale.getdefaultlocale())
 
+def get_vswitch_names(rst_files):
+    """ Function will return a list of vSwitches detected in given ``rst_files``.
+    """
+    vswitch_names = set()
+    if rst_files:
+        try:
+            output = subprocess.check_output(['grep', '-h', '^* vSwitch'] + rst_files).decode().splitlines()
+            for line in output:
+                match = re.search(r'^\* vSwitch: ([^,]+)', str(line))
+                if match:
+                    vswitch_names.add(match.group(1))
+
+            if vswitch_names:
+                return list(vswitch_names)
+
+        except subprocess.CalledProcessError:
+            _LOGGER.warning('Cannot detect vSwitches used during testing.')
+
+    # fallback to the default value
+    return ['vSwitch']
+
+def get_build_tag():
+    """ Function will return a Jenkins job ID environment variable.
+    """
+
+    try:
+        build_tag = os.environ['BUILD_TAG']
+
+    except KeyError:
+        _LOGGER.warning('Cannot detect Jenkins job ID')
+        build_tag = "none"
+
+    return build_tag
 
 
-def generate_final_report(path):
+def generate_final_report():
     """ Function will check if partial test results are available
     and generates final report in rst format.
     """
 
     """ Function will check if partial test results are available
     and generates final report in rst format.
     """
 
+    path = settings.getValue('RESULTS_PATH')
     # check if there are any results in rst format
     rst_results = glob.glob(os.path.join(path, 'result*rst'))
     # check if there are any results in rst format
     rst_results = glob.glob(os.path.join(path, 'result*rst'))
-    if len(rst_results):
+    pkt_processors = get_vswitch_names(rst_results)
+    if rst_results:
         try:
         try:
-            test_report = os.path.join(path, _TEMPLATE_RST['final'])
-            retval = subprocess.call('cat {} {} {} > {}'.format(_TEMPLATE_RST['head'], ' '.join(rst_results),
-                                                                _TEMPLATE_RST['foot'], test_report), shell=True)
+            test_report = os.path.join(path, '{}_{}'.format('_'.join(pkt_processors), _TEMPLATE_RST['final']))
+            # create report caption directly - it is not worth to execute jinja machinery
+            report_caption = '{}\n{} {}\n{}\n\n'.format(
+                '============================================================',
+                'Performance report for',
+                ', '.join(pkt_processors),
+                '============================================================')
+
+            with open(_TEMPLATE_RST['tmp'], 'w') as file_:
+                file_.write(report_caption)
+
+            retval = subprocess.call('cat {} {} {} {} > {}'.format(_TEMPLATE_RST['tmp'], _TEMPLATE_RST['head'],
+                                                                   ' '.join(rst_results), _TEMPLATE_RST['foot'],
+                                                                   test_report), shell=True)
             if retval == 0 and os.path.isfile(test_report):
             if retval == 0 and os.path.isfile(test_report):
-                logging.info('Overall test report written to "%s"', test_report)
+                _LOGGER.info('Overall test report written to "%s"', test_report)
             else:
             else:
-                logging.error('Generatrion of overall test report has failed.')
+                _LOGGER.error('Generation of overall test report has failed.')
+
+            # remove temporary file
+            os.remove(_TEMPLATE_RST['tmp'])
+
         except subprocess.CalledProcessError:
         except subprocess.CalledProcessError:
-            logging.error('Generatrion of overall test report has failed.')
+            _LOGGER.error('Generatrion of overall test report has failed.')
+
+
+def generate_performance_matrix(selected_tests, results_path):
+    """
+    Loads the results of all the currently run tests, compares them
+    based on the MATRIX_METRIC, outputs and saves the generated table.
+    :selected_tests: list of currently run test
+    :results_path: directory path to the results of current tests
+    """
+    _LOGGER.info('Performance Matrix:')
+    test_list = []
+
+    for test in selected_tests:
+        test_name = test.get('Name', '<Name not set>')
+        test_deployment = test.get('Deployment', '<Deployment not set>')
+        test_list.append({'test_name':test_name, 'test_deployment':test_deployment, 'csv_data':False})
+
+    test_params = {}
+    output = []
+    all_params = settings.getValue('_PARAMS_LIST')
+    for i in range(len(selected_tests)):
+        test = test_list[i]
+        if isinstance(all_params, list):
+            list_index = i
+            if i >= len(all_params):
+                list_index = len(all_params) - 1
+            if settings.getValue('CUMULATIVE_PARAMS') and (i > 0):
+                test_params.update(all_params[list_index])
+            else:
+                test_params = all_params[list_index]
+        else:
+            test_params = all_params
+        settings.setValue('TEST_PARAMS', test_params)
+        test['test_params'] = copy.deepcopy(test_params)
+        try:
+            with open("{}/result_{}_{}_{}.csv".format(results_path, str(i),
+                                                      test['test_name'], test['test_deployment'])) as csvfile:
+                reader = list(csv.DictReader(csvfile))
+                test['csv_data'] = reader[0]
+        # pylint: disable=broad-except
+        except (Exception) as ex:
+            _LOGGER.error("Result file not found: %s", ex)
+
+    metric = settings.getValue('MATRIX_METRIC')
+    change = {}
+    output_header = ("ID", "Name", metric, "Change [%]", "Parameters, "\
+                     "CUMULATIVE_PARAMS = {}".format(settings.getValue('CUMULATIVE_PARAMS')))
+    if not test_list[0]['csv_data'] or float(test_list[0]['csv_data'][metric]) == 0:
+        _LOGGER.error("Incorrect format of test results")
+        return
+    for i, test in enumerate(test_list):
+        if test['csv_data']:
+            change[i] = float(test['csv_data'][metric])/\
+                        (float(test_list[0]['csv_data'][metric]) / 100) - 100
+            output.append([i, test['test_name'], float(test['csv_data'][metric]),
+                           change[i], str(test['test_params'])[1:-1]])
+        else:
+            change[i] = 0
+            output.append([i, test['test_name'], "Test Failed", 0, test['test_params']])
+    print(tabulate(output, headers=output_header, tablefmt="grid", floatfmt="0.3f"))
+    with open(results_path + '/result_performance_matrix.rst', 'w+') as output_file:
+        output_file.write(_TEMPLATE_MATRIX.format(metric, tabulate(output, headers=output_header,
+                                                                   tablefmt="rst", floatfmt="0.3f")))
+        _LOGGER.info('Performance matrix written to: "%s/result_performance_matrix.rst"', results_path)
+
+def enable_sriov(nic_list):
+    """ Enable SRIOV for given enhanced PCI IDs
+
+    :param nic_list: A list of enhanced PCI IDs
+    """
+    # detect if sriov is required
+    sriov_nic = {}
+    for nic in nic_list:
+        if networkcard.is_sriov_nic(nic):
+            tmp_nic = nic.split('|')
+            if tmp_nic[0] in sriov_nic:
+                if int(tmp_nic[1][2:]) > sriov_nic[tmp_nic[0]]:
+                    sriov_nic[tmp_nic[0]] = int(tmp_nic[1][2:])
+            else:
+                sriov_nic.update({tmp_nic[0] : int(tmp_nic[1][2:])})
+
+    # sriov is required for some NICs
+    if sriov_nic:
+        for nic in sriov_nic:
+            # check if SRIOV is supported and enough virt interfaces are available
+            if not networkcard.is_sriov_supported(nic) \
+                or networkcard.get_sriov_numvfs(nic) <= sriov_nic[nic]:
+                # if not, enable and set appropriate number of VFs
+                if not networkcard.set_sriov_numvfs(nic, sriov_nic[nic] + 1):
+                    raise RuntimeError('SRIOV cannot be enabled for NIC {}'.format(nic))
+                else:
+                    _LOGGER.debug("SRIOV enabled for NIC %s", nic)
+
+                # ensure that path to the bind tool is valid
+                functions.settings_update_paths()
+
+                # WORKAROUND: it has been observed with IXGBE(VF) driver,
+                # that NIC doesn't correclty dispatch traffic to VFs based
+                # on their MAC address. Unbind and bind to the same driver
+                # solves this issue.
+                networkcard.reinit_vfs(nic)
+
+        # After SRIOV is enabled it takes some time until network drivers
+        # properly initialize all cards.
+        # Wait also in case, that SRIOV was already configured as it can be
+        # configured automatically just before vsperf execution.
+        time.sleep(2)
+
+        return True
+
+    return False
+
+
+def disable_sriov(nic_list):
+    """ Disable SRIOV for given PCI IDs
+
+    :param nic_list: A list of enhanced PCI IDs
+    """
+    for nic in nic_list:
+        if networkcard.is_sriov_nic(nic):
+            if not networkcard.set_sriov_numvfs(nic.split('|')[0], 0):
+                raise RuntimeError('SRIOV cannot be disabled for NIC {}'.format(nic))
+            else:
+                _LOGGER.debug("SRIOV disabled for NIC %s", nic.split('|')[0])
+
+
+def handle_list_options(args):
+    """ Process --list cli arguments if needed
+
+    :param args: A dictionary with all CLI arguments
+    """
+    if args['list_trafficgens']:
+        print(Loader().get_trafficgens_printable())
+        sys.exit(0)
+
+    if args['list_collectors']:
+        print(Loader().get_collectors_printable())
+        sys.exit(0)
+
+    if args['list_vswitches']:
+        print(Loader().get_vswitches_printable())
+        sys.exit(0)
+
+    if args['list_vnfs']:
+        print(Loader().get_vnfs_printable())
+        sys.exit(0)
+
+    if args['list_fwdapps']:
+        print(Loader().get_pktfwds_printable())
+        sys.exit(0)
+
+    if args['list_loadgens']:
+        print(Loader().get_loadgens_printable())
+        sys.exit(0)
+
+    if args['list_settings']:
+        print(str(settings))
+        sys.exit(0)
+
+    if args['list']:
+        list_testcases(args)
+        sys.exit(0)
+
+
+def list_testcases(args):
+    """ Print list of testcases requested by --list CLI argument
+
+    :param args: A dictionary with all CLI arguments
+    """
+    # configure tests
+    if args['integration']:
+        testcases = settings.getValue('INTEGRATION_TESTS')
+    else:
+        testcases = settings.getValue('PERFORMANCE_TESTS')
+
+    print("Available Tests:")
+    print("================")
+
+    for test in testcases:
+        description = functions.format_description(test['Description'], 70)
+        if len(test['Name']) < 40:
+            print('* {:40} {}'.format('{}:'.format(test['Name']), description[0]))
+        else:
+            print('* {}'.format('{}:'.format(test['Name'])))
+            print('  {:40} {}'.format('', description[0]))
+        for i in range(1, len(description)):
+            print('  {:40} {}'.format('', description[i]))
+
+
+def vsperf_finalize():
+    """ Clean up before exit
+    """
+    # remove directory if no result files were created
+    try:
+        results_path = settings.getValue('RESULTS_PATH')
+        if os.path.exists(results_path):
+            files_list = os.listdir(results_path)
+            if files_list == []:
+                _LOGGER.info("Removing empty result directory: %s", results_path)
+                shutil.rmtree(results_path)
+    except AttributeError:
+        # skip it if parameter doesn't exist
+        pass
+
+    # disable SRIOV if needed
+    try:
+        if settings.getValue('SRIOV_ENABLED'):
+            disable_sriov(settings.getValue('WHITELIST_NICS_ORIG'))
+    except AttributeError:
+        # skip it if parameter doesn't exist
+        pass
 
 
 class MockTestCase(unittest.TestCase):
 
 
 class MockTestCase(unittest.TestCase):
@@ -315,7 +662,7 @@ class MockTestCase(unittest.TestCase):
         on how self.is_pass was set in the constructor"""
         self.assertTrue(self.is_pass, self.msg)
 
         on how self.is_pass was set in the constructor"""
         self.assertTrue(self.is_pass, self.msg)
 
-
+# pylint: disable=too-many-locals, too-many-branches, too-many-statements
 def main():
     """Main function.
     """
 def main():
     """Main function.
     """
@@ -323,7 +670,16 @@ def main():
 
     # configure settings
 
 
     # configure settings
 
-    settings.load_from_dir('conf')
+    settings.load_from_dir(os.path.join(_CURR_DIR, 'conf'))
+
+    # Define the timestamp to be used by logs and results
+    date = datetime.datetime.fromtimestamp(time.time())
+    timestamp = date.strftime('%Y-%m-%d_%H-%M-%S')
+    settings.setValue('LOG_TIMESTAMP', timestamp)
+
+    # Load non performance/integration tests
+    if args['integration']:
+        settings.load_from_dir(os.path.join(_CURR_DIR, 'conf/integration'))
 
     # load command line parameters first in case there are settings files
     # to be used
 
     # load command line parameters first in case there are settings files
     # to be used
@@ -339,29 +695,28 @@ def main():
     # than both a settings file and environment variables
     settings.load_from_dict(args)
 
     # than both a settings file and environment variables
     settings.load_from_dict(args)
 
-    vswitch_none = False
-    # set dpdk and ovs paths accorfing to VNF and VSWITCH
-    if settings.getValue('VSWITCH').endswith('Vanilla'):
-        # settings paths for Vanilla
-        settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_VANILLA')))
-    elif settings.getValue('VSWITCH').endswith('Vhost'):
-        if settings.getValue('VNF').endswith('Cuse'):
-            # settings paths for Cuse
-            settings.setValue('RTE_SDK', (settings.getValue('RTE_SDK_CUSE')))
-            settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_CUSE')))
-        else:
-            # settings paths for VhostUser
-            settings.setValue('RTE_SDK', (settings.getValue('RTE_SDK_USER')))
-            settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_USER')))
-    else:
-        # default - set to VHOST USER but can be changed during enhancement
-        settings.setValue('RTE_SDK', (settings.getValue('RTE_SDK_USER')))
-        settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_USER')))
-        if 'none' == settings.getValue('VSWITCH').strip().lower():
-            vswitch_none = True
+    settings.setValue('mode', args['mode'])
+
+    # update paths to trafficgens if required
+    if settings.getValue('mode') == 'trafficgen':
+        functions.settings_update_paths()
+
+    # if required, handle list-* operations
+    handle_list_options(args)
+
+    # generate results directory name
+    results_dir = "results_" + timestamp
+    results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
+    settings.setValue('RESULTS_PATH', results_path)
+
+    # create results directory
+    if not os.path.exists(results_path):
+        os.makedirs(results_path)
 
     configure_logging(settings.getValue('VERBOSITY'))
 
     configure_logging(settings.getValue('VERBOSITY'))
-    logger = logging.getLogger()
+
+    # CI build support
+    _LOGGER.info("Creating result directory: %s", results_path)
 
     # check and fix locale
     check_and_set_locale()
 
     # check and fix locale
     check_and_set_locale()
@@ -370,20 +725,20 @@ def main():
     if args['trafficgen']:
         trafficgens = Loader().get_trafficgens()
         if args['trafficgen'] not in trafficgens:
     if args['trafficgen']:
         trafficgens = Loader().get_trafficgens()
         if args['trafficgen'] not in trafficgens:
-            logging.error('There are no trafficgens matching \'%s\' found in'
+            _LOGGER.error('There are no trafficgens matching \'%s\' found in'
                           ' \'%s\'. Exiting...', args['trafficgen'],
                           settings.getValue('TRAFFICGEN_DIR'))
             sys.exit(1)
 
                           ' \'%s\'. Exiting...', args['trafficgen'],
                           settings.getValue('TRAFFICGEN_DIR'))
             sys.exit(1)
 
-    # configure vswitch
+    # configuration validity checks
     if args['vswitch']:
     if args['vswitch']:
-        vswitch_none = 'none' == args['vswitch'].strip().lower()
+        vswitch_none = args['vswitch'].strip().lower() == 'none'
         if vswitch_none:
             settings.setValue('VSWITCH', 'none')
         else:
             vswitches = Loader().get_vswitches()
             if args['vswitch'] not in vswitches:
         if vswitch_none:
             settings.setValue('VSWITCH', 'none')
         else:
             vswitches = Loader().get_vswitches()
             if args['vswitch'] not in vswitches:
-                logging.error('There are no vswitches matching \'%s\' found in'
+                _LOGGER.error('There are no vswitches matching \'%s\' found in'
                               ' \'%s\'. Exiting...', args['vswitch'],
                               settings.getValue('VSWITCH_DIR'))
                 sys.exit(1)
                               ' \'%s\'. Exiting...', args['vswitch'],
                               settings.getValue('VSWITCH_DIR'))
                 sys.exit(1)
@@ -392,7 +747,7 @@ def main():
         settings.setValue('PKTFWD', args['fwdapp'])
         fwdapps = Loader().get_pktfwds()
         if args['fwdapp'] not in fwdapps:
         settings.setValue('PKTFWD', args['fwdapp'])
         fwdapps = Loader().get_pktfwds()
         if args['fwdapp'] not in fwdapps:
-            logging.error('There are no forwarding application'
+            _LOGGER.error('There are no forwarding application'
                           ' matching \'%s\' found in'
                           ' \'%s\'. Exiting...', args['fwdapp'],
                           settings.getValue('PKTFWD_DIR'))
                           ' matching \'%s\' found in'
                           ' \'%s\'. Exiting...', args['fwdapp'],
                           settings.getValue('PKTFWD_DIR'))
@@ -401,132 +756,140 @@ def main():
     if args['vnf']:
         vnfs = Loader().get_vnfs()
         if args['vnf'] not in vnfs:
     if args['vnf']:
         vnfs = Loader().get_vnfs()
         if args['vnf'] not in vnfs:
-            logging.error('there are no vnfs matching \'%s\' found in'
+            _LOGGER.error('there are no vnfs matching \'%s\' found in'
                           ' \'%s\'. exiting...', args['vnf'],
                           ' \'%s\'. exiting...', args['vnf'],
-                          settings.getValue('vnf_dir'))
+                          settings.getValue('VNF_DIR'))
             sys.exit(1)
 
             sys.exit(1)
 
-    if args['duration']:
-        if args['duration'].isdigit() and int(args['duration']) > 0:
-            settings.setValue('duration', args['duration'])
-        else:
-            logging.error('The selected Duration is not a number')
+    if args['loadgen']:
+        loadgens = Loader().get_loadgens()
+        if args['loadgen'] not in loadgens:
+            _LOGGER.error('There are no loadgens matching \'%s\' found in'
+                          ' \'%s\'. Exiting...', args['loadgen'],
+                          settings.getValue('LOADGEN_DIR'))
             sys.exit(1)
 
             sys.exit(1)
 
-    # update global settings
-    guest_loopback = get_test_param('guest_loopback', None)
-    if guest_loopback:
-        tmp_gl = []
-        for i in range(len(settings.getValue('GUEST_LOOPBACK'))):
-            tmp_gl.append(guest_loopback)
-        settings.setValue('GUEST_LOOPBACK', tmp_gl)
-
-    settings.setValue('mode', args['mode'])
+    if args['exact_test_name'] and args['tests']:
+        _LOGGER.error("Cannot specify tests with both positional args and --test.")
+        sys.exit(1)
+
+    # modify NIC configuration to decode enhanced PCI IDs
+    wl_nics_orig = list(networkcard.check_pci(pci) for pci in settings.getValue('WHITELIST_NICS'))
+    settings.setValue('WHITELIST_NICS_ORIG', wl_nics_orig)
+
+    # sriov handling is performed on checked/expanded PCI IDs
+    settings.setValue('SRIOV_ENABLED', enable_sriov(wl_nics_orig))
+
+    nic_list = []
+    for nic in wl_nics_orig:
+        tmp_nic = networkcard.get_nic_info(nic)
+        if tmp_nic:
+            nic_list.append({'pci' : tmp_nic,
+                             'type' : 'vf' if networkcard.get_sriov_pf(tmp_nic) else 'pf',
+                             'mac' : networkcard.get_mac(tmp_nic),
+                             'driver' : networkcard.get_driver(tmp_nic),
+                             'device' : networkcard.get_device_name(tmp_nic)})
+        else:
+            vsperf_finalize()
+            raise RuntimeError("Invalid network card PCI ID: '{}'".format(nic))
 
 
-    # generate results directory name
-    date = datetime.datetime.fromtimestamp(time.time())
-    results_dir = "results_" + date.strftime('%Y-%m-%d_%H-%M-%S')
-    results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
+    settings.setValue('NICS', nic_list)
+    # for backward compatibility
+    settings.setValue('WHITELIST_NICS', list(nic['pci'] for nic in nic_list))
 
 
-    # create results directory
-    if not os.path.exists(results_path):
-        logger.info("Creating result directory: "  + results_path)
-        os.makedirs(results_path)
 
 
+    # pylint: disable=too-many-nested-blocks
     if settings.getValue('mode') == 'trafficgen':
         # execute only traffic generator
     if settings.getValue('mode') == 'trafficgen':
         # execute only traffic generator
-        logging.debug("Executing traffic generator:")
+        _LOGGER.debug("Executing traffic generator:")
         loader = Loader()
         # set traffic details, so they can be passed to traffic ctl
         loader = Loader()
         # set traffic details, so they can be passed to traffic ctl
-        traffic = copy.deepcopy(TRAFFIC_DEFAULTS)
-        traffic.update({'traffic_type': get_test_param('traffic_type', 'rfc2544'),
-                        'bidir': get_test_param('bidirectional', False),
-                        'multistream': int(get_test_param('multistream', 0)),
-                        'stream_type': get_test_param('stream_type', 'L4'),
-                        'frame_rate': int(get_test_param('iload', 100))})
+        traffic = copy.deepcopy(settings.getValue('TRAFFIC'))
+        traffic = functions.check_traffic(traffic)
 
         traffic_ctl = component_factory.create_traffic(
             traffic['traffic_type'],
             loader.get_trafficgen_class())
         with traffic_ctl:
             traffic_ctl.send_traffic(traffic)
 
         traffic_ctl = component_factory.create_traffic(
             traffic['traffic_type'],
             loader.get_trafficgen_class())
         with traffic_ctl:
             traffic_ctl.send_traffic(traffic)
-        logging.debug("Traffic Results:")
+        _LOGGER.debug("Traffic Results:")
         traffic_ctl.print_results()
         traffic_ctl.print_results()
+
+        # write results into CSV file
+        result_file = os.path.join(results_path, "result.csv")
+        PerformanceTestCase.write_result_to_file(traffic_ctl.get_results(), result_file)
     else:
         # configure tests
     else:
         # configure tests
-        testcases = settings.getValue('PERFORMANCE_TESTS')
-        all_tests = []
-        for cfg in testcases:
-            try:
-                all_tests.append(TestCase(cfg, results_path))
-            except (Exception) as _:
-                logger.exception("Failed to create test: %s",
-                                 cfg.get('Name', '<Name not set>'))
-                raise
-
-        # if required, handle list-* operations
-
-        if args['list']:
-            print("Available Tests:")
-            print("======")
-            for test in all_tests:
-                print('* %-18s%s' % ('%s:' % test.name, test.desc))
-            exit()
-
-        if args['list_trafficgens']:
-            print(Loader().get_trafficgens_printable())
-            exit()
-
-        if args['list_collectors']:
-            print(Loader().get_collectors_printable())
-            exit()
-
-        if args['list_vswitches']:
-            print(Loader().get_vswitches_printable())
-            exit()
-
-        if args['list_vnfs']:
-            print(Loader().get_vnfs_printable())
-            exit()
-
-        if args['list_settings']:
-            print(str(settings))
-            exit()
-
-        # select requested tests
-        if args['exact_test_name'] and args['tests']:
-            logger.error("Cannot specify tests with both positional args and --test.")
-            sys.exit(1)
+        if args['integration']:
+            testcases = settings.getValue('INTEGRATION_TESTS')
+        else:
+            testcases = settings.getValue('PERFORMANCE_TESTS')
 
         if args['exact_test_name']:
             exact_names = args['exact_test_name']
             # positional args => exact matches only
 
         if args['exact_test_name']:
             exact_names = args['exact_test_name']
             # positional args => exact matches only
-            selected_tests = [test for test in all_tests if test.name in exact_names]
+            selected_tests = []
+            for test_name in exact_names:
+                for test in testcases:
+                    if test['Name'] == test_name:
+                        selected_tests.append(test)
         elif args['tests']:
             # --tests => apply filter to select requested tests
         elif args['tests']:
             # --tests => apply filter to select requested tests
-            selected_tests = apply_filter(all_tests, args['tests'])
+            selected_tests = apply_filter(testcases, args['tests'])
         else:
             # Default - run all tests
         else:
             # Default - run all tests
-            selected_tests = all_tests
+            selected_tests = testcases
 
         if not selected_tests:
 
         if not selected_tests:
-            logger.error("No tests matched --test option or positional args. Done.")
+            _LOGGER.error("No tests matched --tests option or positional args. Done.")
+            vsperf_finalize()
             sys.exit(1)
 
             sys.exit(1)
 
-        # run tests
         suite = unittest.TestSuite()
         suite = unittest.TestSuite()
-        for test in selected_tests:
+        settings_snapshot = copy.deepcopy(settings.__dict__)
+
+        for i, cfg in enumerate(selected_tests):
+            settings.setValue('_TEST_INDEX', i)
+            test_name = cfg.get('Name', '<Name not set>')
             try:
             try:
+                test_params = settings.getValue('_PARAMS_LIST')
+                if isinstance(test_params, list):
+                    list_index = i
+                    if i >= len(test_params):
+                        list_index = len(test_params) - 1
+                    test_params = test_params[list_index]
+                if settings.getValue('CUMULATIVE_PARAMS'):
+                    test_params = merge_spec(settings.getValue('TEST_PARAMS'), test_params)
+                settings.setValue('TEST_PARAMS', test_params)
+
+                if args['integration']:
+                    test = IntegrationTestCase(cfg)
+                else:
+                    test = PerformanceTestCase(cfg)
+
                 test.run()
                 suite.addTest(MockTestCase('', True, test.name))
                 test.run()
                 suite.addTest(MockTestCase('', True, test.name))
-            #pylint: disable=broad-except
+
+            # pylint: disable=broad-except
             except (Exception) as ex:
             except (Exception) as ex:
-                logger.exception("Failed to run test: %s", test.name)
-                suite.addTest(MockTestCase(str(ex), False, test.name))
-                logger.info("Continuing with next test...")
+                _LOGGER.exception("Failed to run test: %s", test_name)
+                suite.addTest(MockTestCase(str(ex), False, test_name))
+                _LOGGER.info("Continuing with next test...")
+            finally:
+                if not settings.getValue('CUMULATIVE_PARAMS'):
+                    settings.restore_from_dict(settings_snapshot)
+
+        settings.restore_from_dict(settings_snapshot)
+
+
+        # Generate and printout Performance Matrix
+        if args['matrix']:
+            generate_performance_matrix(selected_tests, results_path)
 
         # generate final rst report with results of all executed TCs
 
         # generate final rst report with results of all executed TCs
-        generate_final_report(results_path)
+        generate_final_report()
+
+
 
         if settings.getValue('XUNIT'):
             xmlrunner.XMLTestRunner(
 
         if settings.getValue('XUNIT'):
             xmlrunner.XMLTestRunner(
@@ -535,29 +898,24 @@ def main():
 
         if args['opnfvpod']:
             pod_name = args['opnfvpod']
 
         if args['opnfvpod']:
             pod_name = args['opnfvpod']
-            installer_name = settings.getValue('OPNFV_INSTALLER')
+            installer_name = str(settings.getValue('OPNFV_INSTALLER')).lower()
             opnfv_url = settings.getValue('OPNFV_URL')
             pkg_list = settings.getValue('PACKAGE_LIST')
 
             opnfv_url = settings.getValue('OPNFV_URL')
             pkg_list = settings.getValue('PACKAGE_LIST')
 
-            int_data = {'cuse': False,
-                        'vanilla': False,
-                        'pod': pod_name,
+            int_data = {'pod': pod_name,
+                        'build_tag': get_build_tag(),
                         'installer': installer_name,
                         'pkg_list': pkg_list,
                         'installer': installer_name,
                         'pkg_list': pkg_list,
-                        'db_url': opnfv_url}
-            if settings.getValue('VSWITCH').endswith('Vanilla'):
-                int_data['vanilla'] = True
-            if settings.getValue('VNF').endswith('Cuse'):
-                int_data['cuse'] = True
-            opnfvdashboard.results2opnfv_dashboard(results_path, int_data)
-
-    #remove directory if no result files were created.
-    if os.path.exists(results_path):
-        files_list = os.listdir(results_path)
-        if files_list == []:
-            shutil.rmtree(results_path)
+                        'db_url': opnfv_url,
+                        # pass vswitch name from configuration to be used for failed
+                        # TCs; In case of successful TCs it is safer to use vswitch
+                        # name from CSV as TC can override global configuration
+                        'vswitch': str(settings.getValue('VSWITCH')).lower()}
+            tc_names = [tc['Name'] for tc in selected_tests]
+            opnfvdashboard.results2opnfv_dashboard(tc_names, results_path, int_data)
+
+    # cleanup before exit
+    vsperf_finalize()
 
 if __name__ == "__main__":
     main()
 
 if __name__ == "__main__":
     main()
-
-