import argparse
import re
import time
+import csv
import datetime
import shutil
import unittest
import subprocess
import ast
import xmlrunner
+from tabulate import tabulate
+from conf import merge_spec
from conf import settings
import core.component_factory as component_factory
from core.loader import Loader
from tools import functions
from tools.pkt_gen import trafficgen
from tools.opnfvdashboard import opnfvdashboard
-
sys.dont_write_bytecode = True
VERBOSITY_LEVELS = {
'tmp' : os.path.join(_CURR_DIR, 'tools/report/report_tmp_caption.rst')
}
+_TEMPLATE_MATRIX = "Performance Matrix\n------------------\n\n"\
+ "The following performance matrix was generated with the results of all the\n"\
+ "currently run tests. The metric used for comparison is {}.\n\n{}\n\n"
_LOGGER = logging.getLogger()
+def parse_param_string(values):
+ """
+ Parse and split a single '--test-params' argument.
+
+ This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
+ values. For multiple overrides use a ; separated list for
+ e.g. --test-params 'x=z; y=(a,b)'
+ """
+ results = {}
+
+ if values == '':
+ return {}
+
+ for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
+ param = param.strip()
+ value = value.strip()
+ if param:
+ if value:
+ # values are passed inside string from CLI, so we must retype them accordingly
+ try:
+ results[param] = ast.literal_eval(value)
+ except ValueError:
+ # for backward compatibility, we have to accept strings without quotes
+ _LOGGER.warning("Adding missing quotes around string value: %s = %s",
+ param, str(value))
+ results[param] = str(value)
+ else:
+ results[param] = True
+ return results
+
+
def parse_arguments():
"""
Parse command line arguments.
"""
class _SplitTestParamsAction(argparse.Action):
"""
- Parse and split the '--test-params' argument.
+ Parse and split '--test-params' arguments.
- This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
- values. For multiple overrides use a ; separated list for
+ This expects either a single list of ; separated overrides
+ as 'x=y', 'x=y,z' or 'x' (implicit true) values.
e.g. --test-params 'x=z; y=(a,b)'
+ Or a list of these ; separated lists with overrides for
+ multiple tests.
+ e.g. --test-params "['x=z; y=(a,b)','x=z']"
"""
def __call__(self, parser, namespace, values, option_string=None):
- results = {}
-
- for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
- param = param.strip()
- value = value.strip()
- if len(param):
- if len(value):
- # values are passed inside string from CLI, so we must retype them accordingly
- try:
- results[param] = ast.literal_eval(value)
- except ValueError:
- # for backward compatibility, we have to accept strings without quotes
- _LOGGER.warning("Adding missing quotes around string value: %s = %s",
- param, str(value))
- results[param] = str(value)
- else:
- results[param] = True
-
+ if values[0] == '[':
+ input_list = ast.literal_eval(values)
+ parameter_list = []
+ for test_params in input_list:
+ parameter_list.append(parse_param_string(test_params))
+ else:
+ parameter_list = parse_param_string(values)
+ results = {'_PARAMS_LIST':parameter_list}
setattr(namespace, self.dest, results)
class _ValidateFileAction(argparse.Action):
def list_logging_levels():
"""Give a summary of all available logging levels.
- :return: List of verbosity level names in decreasing order of
+ :return: List of verbosity level names in decreasing order of
verbosity
"""
return sorted(VERBOSITY_LEVELS.keys(),
help='list all system forwarding applications and exit')
parser.add_argument('--list-vnfs', action='store_true',
help='list all system vnfs and exit')
+ parser.add_argument('--list-loadgens', action='store_true',
+ help='list all background load generators')
parser.add_argument('--list-settings', action='store_true',
help='list effective settings configuration and exit')
parser.add_argument('exact_test_name', nargs='*', help='Exact names of\
group.add_argument('--vswitch', help='vswitch implementation to use')
group.add_argument('--fwdapp', help='packet forwarding application to use')
group.add_argument('--vnf', help='vnf to use')
+ group.add_argument('--loadgen', help='loadgen to use')
group.add_argument('--sysmetrics', help='system metrics logger to use')
group = parser.add_argument_group('test behavior options')
group.add_argument('--xunit', action='store_true',
help='settings file')
group.add_argument('--test-params', action=_SplitTestParamsAction,
help='csv list of test parameters: key=val; e.g. '
- 'TRAFFICGEN_PKT_SIZES=(64,128);TRAFICGEN_DURATION=30; '
- 'GUEST_LOOPBACK=["l2fwd"] ...')
+ 'TRAFFICGEN_PKT_SIZES=(64,128);TRAFFICGEN_DURATION=30; '
+ 'GUEST_LOOPBACK=["l2fwd"] ...'
+ ' or a list of csv lists of test parameters: key=val; e.g. '
+ '[\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)\','
+ '\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)\']')
group.add_argument('--opnfvpod', help='name of POD in opnfv')
+ group.add_argument('--matrix', help='enable performance matrix analysis',
+ action='store_true', default=False)
args = vars(parser.parse_args())
def configure_logging(level):
"""Configure logging.
"""
+ name, ext = os.path.splitext(settings.getValue('LOG_FILE_DEFAULT'))
+ rename_default = "{name}_{uid}{ex}".format(name=name,
+ uid=settings.getValue(
+ 'LOG_TIMESTAMP'),
+ ex=ext)
log_file_default = os.path.join(
- settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_DEFAULT'))
+ settings.getValue('LOG_DIR'), rename_default)
+ name, ext = os.path.splitext(settings.getValue('LOG_FILE_HOST_CMDS'))
+ rename_hostcmd = "{name}_{uid}{ex}".format(name=name,
+ uid=settings.getValue(
+ 'LOG_TIMESTAMP'),
+ ex=ext)
log_file_host_cmds = os.path.join(
- settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_HOST_CMDS'))
+ settings.getValue('LOG_DIR'), rename_hostcmd)
+ name, ext = os.path.splitext(settings.getValue('LOG_FILE_TRAFFIC_GEN'))
+ rename_traffic = "{name}_{uid}{ex}".format(name=name,
+ uid=settings.getValue(
+ 'LOG_TIMESTAMP'),
+ ex=ext)
log_file_traffic_gen = os.path.join(
- settings.getValue('LOG_DIR'),
- settings.getValue('LOG_FILE_TRAFFIC_GEN'))
+ settings.getValue('LOG_DIR'), rename_traffic)
+ metrics_file = (settings.getValue('LOG_FILE_INFRA_METRICS_PFX') +
+ settings.getValue('LOG_TIMESTAMP') + '.log')
+ log_file_infra_metrics = os.path.join(settings.getValue('LOG_DIR'),
+ metrics_file)
_LOGGER.setLevel(logging.DEBUG)
file_logger = logging.FileHandler(filename=log_file_default)
file_logger.setLevel(logging.DEBUG)
+ file_logger.setFormatter(logging.Formatter(
+ '%(asctime)s : %(message)s'))
_LOGGER.addHandler(file_logger)
class CommandFilter(logging.Filter):
def filter(self, record):
return record.getMessage().startswith(trafficgen.CMD_PREFIX)
+ class CollectdMetricsFilter(logging.Filter):
+ """Filter out strings beginning with 'COLLECTD' :'"""
+ def filter(self, record):
+ return record.getMessage().startswith('COLLECTD')
+
cmd_logger = logging.FileHandler(filename=log_file_host_cmds)
cmd_logger.setLevel(logging.DEBUG)
cmd_logger.addFilter(CommandFilter())
gen_logger.addFilter(TrafficGenCommandFilter())
_LOGGER.addHandler(gen_logger)
+ if settings.getValue('COLLECTOR') == 'Collectd':
+ met_logger = logging.FileHandler(filename=log_file_infra_metrics)
+ met_logger.setLevel(logging.DEBUG)
+ met_logger.addFilter(CollectdMetricsFilter())
+ _LOGGER.addHandler(met_logger)
+
def apply_filter(tests, tc_filter):
"""Allow a subset of tests to be conveniently selected
""" Function will return a list of vSwitches detected in given ``rst_files``.
"""
vswitch_names = set()
- if len(rst_files):
+ if rst_files:
try:
output = subprocess.check_output(['grep', '-h', '^* vSwitch'] + rst_files).decode().splitlines()
for line in output:
if match:
vswitch_names.add(match.group(1))
- if len(vswitch_names):
+ if vswitch_names:
return list(vswitch_names)
except subprocess.CalledProcessError:
# fallback to the default value
return ['vSwitch']
+def get_build_tag():
+ """ Function will return a Jenkins job ID environment variable.
+ """
+
+ try:
+ build_tag = os.environ['BUILD_TAG']
+ except KeyError:
+ _LOGGER.warning('Cannot detect Jenkins job ID')
+ build_tag = "none"
+
+ return build_tag
def generate_final_report():
""" Function will check if partial test results are available
# check if there are any results in rst format
rst_results = glob.glob(os.path.join(path, 'result*rst'))
pkt_processors = get_vswitch_names(rst_results)
- if len(rst_results):
+ if rst_results:
try:
test_report = os.path.join(path, '{}_{}'.format('_'.join(pkt_processors), _TEMPLATE_RST['final']))
# create report caption directly - it is not worth to execute jinja machinery
if retval == 0 and os.path.isfile(test_report):
_LOGGER.info('Overall test report written to "%s"', test_report)
else:
- _LOGGER.error('Generatrion of overall test report has failed.')
+ _LOGGER.error('Generation of overall test report has failed.')
# remove temporary file
os.remove(_TEMPLATE_RST['tmp'])
_LOGGER.error('Generatrion of overall test report has failed.')
+def generate_performance_matrix(selected_tests, results_path):
+ """
+ Loads the results of all the currently run tests, compares them
+ based on the MATRIX_METRIC, outputs and saves the generated table.
+ :selected_tests: list of currently run test
+ :results_path: directory path to the results of current tests
+ """
+ _LOGGER.info('Performance Matrix:')
+ test_list = []
+
+ for test in selected_tests:
+ test_name = test.get('Name', '<Name not set>')
+ test_deployment = test.get('Deployment', '<Deployment not set>')
+ test_list.append({'test_name':test_name, 'test_deployment':test_deployment, 'csv_data':False})
+
+ test_params = {}
+ output = []
+ all_params = settings.getValue('_PARAMS_LIST')
+ for i in range(len(selected_tests)):
+ test = test_list[i]
+ if isinstance(all_params, list):
+ list_index = i
+ if i >= len(all_params):
+ list_index = len(all_params) - 1
+ if settings.getValue('CUMULATIVE_PARAMS') and (i > 0):
+ test_params.update(all_params[list_index])
+ else:
+ test_params = all_params[list_index]
+ else:
+ test_params = all_params
+ settings.setValue('TEST_PARAMS', test_params)
+ test['test_params'] = copy.deepcopy(test_params)
+ try:
+ with open("{}/result_{}_{}_{}.csv".format(results_path, str(i),
+ test['test_name'], test['test_deployment'])) as csvfile:
+ reader = list(csv.DictReader(csvfile))
+ test['csv_data'] = reader[0]
+ # pylint: disable=broad-except
+ except (Exception) as ex:
+ _LOGGER.error("Result file not found: %s", ex)
+
+ metric = settings.getValue('MATRIX_METRIC')
+ change = {}
+ output_header = ("ID", "Name", metric, "Change [%]", "Parameters, "\
+ "CUMULATIVE_PARAMS = {}".format(settings.getValue('CUMULATIVE_PARAMS')))
+ if not test_list[0]['csv_data'] or float(test_list[0]['csv_data'][metric]) == 0:
+ _LOGGER.error("Incorrect format of test results")
+ return
+ for i, test in enumerate(test_list):
+ if test['csv_data']:
+ change[i] = float(test['csv_data'][metric])/\
+ (float(test_list[0]['csv_data'][metric]) / 100) - 100
+ output.append([i, test['test_name'], float(test['csv_data'][metric]),
+ change[i], str(test['test_params'])[1:-1]])
+ else:
+ change[i] = 0
+ output.append([i, test['test_name'], "Test Failed", 0, test['test_params']])
+ print(tabulate(output, headers=output_header, tablefmt="grid", floatfmt="0.3f"))
+ with open(results_path + '/result_performance_matrix.rst', 'w+') as output_file:
+ output_file.write(_TEMPLATE_MATRIX.format(metric, tabulate(output, headers=output_header,
+ tablefmt="rst", floatfmt="0.3f")))
+ _LOGGER.info('Performance matrix written to: "%s/result_performance_matrix.rst"', results_path)
+
def enable_sriov(nic_list):
""" Enable SRIOV for given enhanced PCI IDs
sriov_nic.update({tmp_nic[0] : int(tmp_nic[1][2:])})
# sriov is required for some NICs
- if len(sriov_nic):
+ if sriov_nic:
for nic in sriov_nic:
# check if SRIOV is supported and enough virt interfaces are available
if not networkcard.is_sriov_supported(nic) \
else:
_LOGGER.debug("SRIOV enabled for NIC %s", nic)
+ # ensure that path to the bind tool is valid
+ functions.settings_update_paths()
+
# WORKAROUND: it has been observed with IXGBE(VF) driver,
# that NIC doesn't correclty dispatch traffic to VFs based
# on their MAC address. Unbind and bind to the same driver
print(Loader().get_pktfwds_printable())
sys.exit(0)
+ if args['list_loadgens']:
+ print(Loader().get_loadgens_printable())
+ sys.exit(0)
+
if args['list_settings']:
print(str(settings))
sys.exit(0)
if args['list']:
- # configure tests
- if args['integration']:
- testcases = settings.getValue('INTEGRATION_TESTS')
- else:
- testcases = settings.getValue('PERFORMANCE_TESTS')
+ list_testcases(args)
+ sys.exit(0)
- print("Available Tests:")
- print("================")
- for test in testcases:
- print('* %-30s %s' % ('%s:' % test['Name'], test['Description']))
- sys.exit(0)
+def list_testcases(args):
+ """ Print list of testcases requested by --list CLI argument
+
+ :param args: A dictionary with all CLI arguments
+ """
+ # configure tests
+ if args['integration']:
+ testcases = settings.getValue('INTEGRATION_TESTS')
+ else:
+ testcases = settings.getValue('PERFORMANCE_TESTS')
+
+ print("Available Tests:")
+ print("================")
+
+ for test in testcases:
+ description = functions.format_description(test['Description'], 70)
+ if len(test['Name']) < 40:
+ print('* {:40} {}'.format('{}:'.format(test['Name']), description[0]))
+ else:
+ print('* {}'.format('{}:'.format(test['Name'])))
+ print(' {:40} {}'.format('', description[0]))
+ for i in range(1, len(description)):
+ print(' {:40} {}'.format('', description[i]))
def vsperf_finalize():
if os.path.exists(results_path):
files_list = os.listdir(results_path)
if files_list == []:
- _LOGGER.info("Removing empty result directory: " + results_path)
+ _LOGGER.info("Removing empty result directory: %s", results_path)
shutil.rmtree(results_path)
except AttributeError:
# skip it if parameter doesn't exist
settings.load_from_dir(os.path.join(_CURR_DIR, 'conf'))
+ # Define the timestamp to be used by logs and results
+ date = datetime.datetime.fromtimestamp(time.time())
+ timestamp = date.strftime('%Y-%m-%d_%H-%M-%S')
+ settings.setValue('LOG_TIMESTAMP', timestamp)
+
# Load non performance/integration tests
if args['integration']:
settings.load_from_dir(os.path.join(_CURR_DIR, 'conf/integration'))
settings.setValue('mode', args['mode'])
- # set dpdk and ovs paths according to VNF and VSWITCH
- if settings.getValue('mode') != 'trafficgen':
+ # update paths to trafficgens if required
+ if settings.getValue('mode') == 'trafficgen':
functions.settings_update_paths()
# if required, handle list-* operations
settings.getValue('VNF_DIR'))
sys.exit(1)
+ if args['loadgen']:
+ loadgens = Loader().get_loadgens()
+ if args['loadgen'] not in loadgens:
+ _LOGGER.error('There are no loadgens matching \'%s\' found in'
+ ' \'%s\'. Exiting...', args['loadgen'],
+ settings.getValue('LOADGEN_DIR'))
+ sys.exit(1)
+
if args['exact_test_name'] and args['tests']:
_LOGGER.error("Cannot specify tests with both positional args and --test.")
sys.exit(1)
settings.setValue('WHITELIST_NICS', list(nic['pci'] for nic in nic_list))
# generate results directory name
- date = datetime.datetime.fromtimestamp(time.time())
- results_dir = "results_" + date.strftime('%Y-%m-%d_%H-%M-%S')
+ # date = datetime.datetime.fromtimestamp(time.time())
+ results_dir = "results_" + timestamp
results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
settings.setValue('RESULTS_PATH', results_path)
# create results directory
if not os.path.exists(results_path):
- _LOGGER.info("Creating result directory: " + results_path)
+ _LOGGER.info("Creating result directory: %s", results_path)
os.makedirs(results_path)
-
+ # pylint: disable=too-many-nested-blocks
if settings.getValue('mode') == 'trafficgen':
# execute only traffic generator
_LOGGER.debug("Executing traffic generator:")
loader = Loader()
# set traffic details, so they can be passed to traffic ctl
traffic = copy.deepcopy(settings.getValue('TRAFFIC'))
+ traffic = functions.check_traffic(traffic)
traffic_ctl = component_factory.create_traffic(
traffic['traffic_type'],
if args['exact_test_name']:
exact_names = args['exact_test_name']
# positional args => exact matches only
- selected_tests = [test for test in testcases if test['Name'] in exact_names]
+ selected_tests = []
+ for test_name in exact_names:
+ for test in testcases:
+ if test['Name'] == test_name:
+ selected_tests.append(test)
elif args['tests']:
# --tests => apply filter to select requested tests
selected_tests = apply_filter(testcases, args['tests'])
# Default - run all tests
selected_tests = testcases
- if not len(selected_tests):
+ if not selected_tests:
_LOGGER.error("No tests matched --tests option or positional args. Done.")
vsperf_finalize()
sys.exit(1)
- # run tests
- # Add pylint exception: Redefinition of test type from
- # testcases.integration.IntegrationTestCase to testcases.performance.PerformanceTestCase
- # pylint: disable=redefined-variable-type
suite = unittest.TestSuite()
- for cfg in selected_tests:
+ settings_snapshot = copy.deepcopy(settings.__dict__)
+
+ for i, cfg in enumerate(selected_tests):
+ settings.setValue('_TEST_INDEX', i)
test_name = cfg.get('Name', '<Name not set>')
try:
+ test_params = settings.getValue('_PARAMS_LIST')
+ if isinstance(test_params, list):
+ list_index = i
+ if i >= len(test_params):
+ list_index = len(test_params) - 1
+ test_params = test_params[list_index]
+ if settings.getValue('CUMULATIVE_PARAMS'):
+ test_params = merge_spec(settings.getValue('TEST_PARAMS'), test_params)
+ settings.setValue('TEST_PARAMS', test_params)
+
if args['integration']:
test = IntegrationTestCase(cfg)
else:
test = PerformanceTestCase(cfg)
+
test.run()
suite.addTest(MockTestCase('', True, test.name))
+
# pylint: disable=broad-except
except (Exception) as ex:
_LOGGER.exception("Failed to run test: %s", test_name)
suite.addTest(MockTestCase(str(ex), False, test_name))
_LOGGER.info("Continuing with next test...")
+ finally:
+ if not settings.getValue('CUMULATIVE_PARAMS'):
+ settings.restore_from_dict(settings_snapshot)
+
+ settings.restore_from_dict(settings_snapshot)
+
+
+ # Generate and printout Performance Matrix
+ if args['matrix']:
+ generate_performance_matrix(selected_tests, results_path)
# generate final rst report with results of all executed TCs
generate_final_report()
+
+
if settings.getValue('XUNIT'):
xmlrunner.XMLTestRunner(
output=settings.getValue('XUNIT_DIR'), outsuffix="",
if args['opnfvpod']:
pod_name = args['opnfvpod']
- installer_name = settings.getValue('OPNFV_INSTALLER')
+ installer_name = str(settings.getValue('OPNFV_INSTALLER')).lower()
opnfv_url = settings.getValue('OPNFV_URL')
pkg_list = settings.getValue('PACKAGE_LIST')
- int_data = {'vanilla': False,
- 'pod': pod_name,
+ int_data = {'pod': pod_name,
+ 'build_tag': get_build_tag(),
'installer': installer_name,
'pkg_list': pkg_list,
- 'db_url': opnfv_url}
- if settings.getValue('VSWITCH').endswith('Vanilla'):
- int_data['vanilla'] = True
- opnfvdashboard.results2opnfv_dashboard(results_path, int_data)
+ 'db_url': opnfv_url,
+ # pass vswitch name from configuration to be used for failed
+ # TCs; In case of successful TCs it is safer to use vswitch
+ # name from CSV as TC can override global configuration
+ 'vswitch': str(settings.getValue('VSWITCH')).lower()}
+ tc_names = [tc['Name'] for tc in selected_tests]
+ opnfvdashboard.results2opnfv_dashboard(tc_names, results_path, int_data)
# cleanup before exit
vsperf_finalize()