X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=vsperf;h=f4104bcf3f00124c573ef0e4283b51ff9ee7b02d;hb=9145d9908a62aa05d2120569ce3bf9e296ebb07c;hp=44e4542574ecea20e73797aa9fce573ba87a043d;hpb=718e9a52e17181d3823b75358d09fcc97734ba4d;p=vswitchperf.git diff --git a/vsperf b/vsperf index 44e45425..f4104bcf 100755 --- a/vsperf +++ b/vsperf @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2015-2016 Intel Corporation. +# Copyright 2015-2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,19 +23,20 @@ import sys import argparse import re import time +import csv import datetime import shutil import unittest -import xmlrunner import locale import copy import glob import subprocess - -sys.dont_write_bytecode = True - +import ast +import xmlrunner +from tabulate import tabulate +from conf import merge_spec from conf import settings -from conf import get_test_param +import core.component_factory as component_factory from core.loader import Loader from testcases import PerformanceTestCase from testcases import IntegrationTestCase @@ -44,8 +45,7 @@ from tools import networkcard from tools import functions from tools.pkt_gen import trafficgen from tools.opnfvdashboard import opnfvdashboard -from tools.pkt_gen.trafficgen.trafficgenhelper import TRAFFIC_DEFAULTS -import core.component_factory as component_factory +sys.dont_write_bytecode = True VERBOSITY_LEVELS = { 'debug': logging.DEBUG, @@ -63,33 +63,67 @@ _TEMPLATE_RST = {'head' : os.path.join(_CURR_DIR, 'tools/report/report_head.rst 'tmp' : os.path.join(_CURR_DIR, 'tools/report/report_tmp_caption.rst') } +_TEMPLATE_MATRIX = "Performance Matrix\n------------------\n\n"\ + "The following performance matrix was generated with the results of all the\n"\ + "currently run tests. The metric used for comparison is {}.\n\n{}\n\n" _LOGGER = logging.getLogger() +def parse_param_string(values): + """ + Parse and split a single '--test-params' argument. + + This expects either 'x=y', 'x=y,z' or 'x' (implicit true) + values. For multiple overrides use a ; separated list for + e.g. --test-params 'x=z; y=(a,b)' + """ + results = {} + + if values == '': + return {} + + for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values): + param = param.strip() + value = value.strip() + if param: + if value: + # values are passed inside string from CLI, so we must retype them accordingly + try: + results[param] = ast.literal_eval(value) + except ValueError: + # for backward compatibility, we have to accept strings without quotes + _LOGGER.warning("Adding missing quotes around string value: %s = %s", + param, str(value)) + results[param] = str(value) + else: + results[param] = True + return results + + def parse_arguments(): """ Parse command line arguments. """ class _SplitTestParamsAction(argparse.Action): """ - Parse and split the '--test-params' argument. + Parse and split '--test-params' arguments. - This expects either 'x=y', 'x=y,z' or 'x' (implicit true) - values. For multiple overrides use a ; separated list for + This expects either a single list of ; separated overrides + as 'x=y', 'x=y,z' or 'x' (implicit true) values. e.g. --test-params 'x=z; y=(a,b)' + Or a list of these ; separated lists with overrides for + multiple tests. + e.g. --test-params "['x=z; y=(a,b)','x=z']" """ def __call__(self, parser, namespace, values, option_string=None): - results = {} - - for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values): - param = param.strip() - value = value.strip() - if len(param): - if len(value): - results[param] = value - else: - results[param] = True - + if values[0] == '[': + input_list = ast.literal_eval(values) + parameter_list = [] + for test_params in input_list: + parameter_list.append(parse_param_string(test_params)) + else: + parameter_list = parse_param_string(values) + results = {'_PARAMS_LIST':parameter_list} setattr(namespace, self.dest, results) class _ValidateFileAction(argparse.Action): @@ -121,7 +155,7 @@ def parse_arguments(): def list_logging_levels(): """Give a summary of all available logging levels. - :return: List of verbosity level names in decreasing order of + :return: List of verbosity level names in decreasing order of verbosity """ return sorted(VERBOSITY_LEVELS.keys(), @@ -142,6 +176,8 @@ def parse_arguments(): help='list all system forwarding applications and exit') parser.add_argument('--list-vnfs', action='store_true', help='list all system vnfs and exit') + parser.add_argument('--list-loadgens', action='store_true', + help='list all background load generators') parser.add_argument('--list-settings', action='store_true', help='list effective settings configuration and exit') parser.add_argument('exact_test_name', nargs='*', help='Exact names of\ @@ -169,6 +205,7 @@ def parse_arguments(): group.add_argument('--vswitch', help='vswitch implementation to use') group.add_argument('--fwdapp', help='packet forwarding application to use') group.add_argument('--vnf', help='vnf to use') + group.add_argument('--loadgen', help='loadgen to use') group.add_argument('--sysmetrics', help='system metrics logger to use') group = parser.add_argument_group('test behavior options') group.add_argument('--xunit', action='store_true', @@ -181,9 +218,14 @@ def parse_arguments(): help='settings file') group.add_argument('--test-params', action=_SplitTestParamsAction, help='csv list of test parameters: key=val; e.g. ' - 'TRAFFICGEN_PKT_SIZES=(64,128);TRAFICGEN_DURATION=30; ' - 'GUEST_LOOPBACK=["l2fwd"] ...') + 'TRAFFICGEN_PKT_SIZES=(64,128);TRAFFICGEN_DURATION=30; ' + 'GUEST_LOOPBACK=["l2fwd"] ...' + ' or a list of csv lists of test parameters: key=val; e.g. ' + '[\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)\',' + '\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)\']') group.add_argument('--opnfvpod', help='name of POD in opnfv') + group.add_argument('--matrix', help='enable performance matrix analysis', + action='store_true', default=False) args = vars(parser.parse_args()) @@ -193,13 +235,31 @@ def parse_arguments(): def configure_logging(level): """Configure logging. """ + name, ext = os.path.splitext(settings.getValue('LOG_FILE_DEFAULT')) + rename_default = "{name}_{uid}{ex}".format(name=name, + uid=settings.getValue( + 'LOG_TIMESTAMP'), + ex=ext) log_file_default = os.path.join( - settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_DEFAULT')) + settings.getValue('RESULTS_PATH'), rename_default) + name, ext = os.path.splitext(settings.getValue('LOG_FILE_HOST_CMDS')) + rename_hostcmd = "{name}_{uid}{ex}".format(name=name, + uid=settings.getValue( + 'LOG_TIMESTAMP'), + ex=ext) log_file_host_cmds = os.path.join( - settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_HOST_CMDS')) + settings.getValue('RESULTS_PATH'), rename_hostcmd) + name, ext = os.path.splitext(settings.getValue('LOG_FILE_TRAFFIC_GEN')) + rename_traffic = "{name}_{uid}{ex}".format(name=name, + uid=settings.getValue( + 'LOG_TIMESTAMP'), + ex=ext) log_file_traffic_gen = os.path.join( - settings.getValue('LOG_DIR'), - settings.getValue('LOG_FILE_TRAFFIC_GEN')) + settings.getValue('RESULTS_PATH'), rename_traffic) + metrics_file = (settings.getValue('LOG_FILE_INFRA_METRICS_PFX') + + settings.getValue('LOG_TIMESTAMP') + '.log') + log_file_infra_metrics = os.path.join(settings.getValue('LOG_DIR'), + metrics_file) _LOGGER.setLevel(logging.DEBUG) @@ -211,6 +271,8 @@ def configure_logging(level): file_logger = logging.FileHandler(filename=log_file_default) file_logger.setLevel(logging.DEBUG) + file_logger.setFormatter(logging.Formatter( + '%(asctime)s : %(message)s')) _LOGGER.addHandler(file_logger) class CommandFilter(logging.Filter): @@ -223,6 +285,11 @@ def configure_logging(level): def filter(self, record): return record.getMessage().startswith(trafficgen.CMD_PREFIX) + class CollectdMetricsFilter(logging.Filter): + """Filter out strings beginning with 'COLLECTD' :'""" + def filter(self, record): + return record.getMessage().startswith('COLLECTD') + cmd_logger = logging.FileHandler(filename=log_file_host_cmds) cmd_logger.setLevel(logging.DEBUG) cmd_logger.addFilter(CommandFilter()) @@ -233,6 +300,12 @@ def configure_logging(level): gen_logger.addFilter(TrafficGenCommandFilter()) _LOGGER.addHandler(gen_logger) + if settings.getValue('COLLECTOR') == 'Collectd': + met_logger = logging.FileHandler(filename=log_file_infra_metrics) + met_logger.setLevel(logging.DEBUG) + met_logger.addFilter(CollectdMetricsFilter()) + _LOGGER.addHandler(met_logger) + def apply_filter(tests, tc_filter): """Allow a subset of tests to be conveniently selected @@ -280,6 +353,39 @@ def check_and_set_locale(): _LOGGER.warning("Locale was not properly configured. Default values were set. Old locale: %s, New locale: %s", system_locale, locale.getdefaultlocale()) +def get_vswitch_names(rst_files): + """ Function will return a list of vSwitches detected in given ``rst_files``. + """ + vswitch_names = set() + if rst_files: + try: + output = subprocess.check_output(['grep', '-h', '^* vSwitch'] + rst_files).decode().splitlines() + for line in output: + match = re.search(r'^\* vSwitch: ([^,]+)', str(line)) + if match: + vswitch_names.add(match.group(1)) + + if vswitch_names: + return list(vswitch_names) + + except subprocess.CalledProcessError: + _LOGGER.warning('Cannot detect vSwitches used during testing.') + + # fallback to the default value + return ['vSwitch'] + +def get_build_tag(): + """ Function will return a Jenkins job ID environment variable. + """ + + try: + build_tag = os.environ['BUILD_TAG'] + + except KeyError: + _LOGGER.warning('Cannot detect Jenkins job ID') + build_tag = "none" + + return build_tag def generate_final_report(): """ Function will check if partial test results are available @@ -289,18 +395,15 @@ def generate_final_report(): path = settings.getValue('RESULTS_PATH') # check if there are any results in rst format rst_results = glob.glob(os.path.join(path, 'result*rst')) - if len(rst_results): + pkt_processors = get_vswitch_names(rst_results) + if rst_results: try: - test_report = os.path.join(path, '{}_{}'.format(settings.getValue('VSWITCH'), _TEMPLATE_RST['final'])) + test_report = os.path.join(path, '{}_{}'.format('_'.join(pkt_processors), _TEMPLATE_RST['final'])) # create report caption directly - it is not worth to execute jinja machinery - if settings.getValue('VSWITCH').lower() != 'none': - pkt_processor = Loader().get_vswitches()[settings.getValue('VSWITCH')].__doc__.strip().split('\n')[0] - else: - pkt_processor = Loader().get_pktfwds()[settings.getValue('PKTFWD')].__doc__.strip().split('\n')[0] report_caption = '{}\n{} {}\n{}\n\n'.format( '============================================================', 'Performance report for', - pkt_processor, + ', '.join(pkt_processors), '============================================================') with open(_TEMPLATE_RST['tmp'], 'w') as file_: @@ -312,7 +415,7 @@ def generate_final_report(): if retval == 0 and os.path.isfile(test_report): _LOGGER.info('Overall test report written to "%s"', test_report) else: - _LOGGER.error('Generatrion of overall test report has failed.') + _LOGGER.error('Generation of overall test report has failed.') # remove temporary file os.remove(_TEMPLATE_RST['tmp']) @@ -321,6 +424,69 @@ def generate_final_report(): _LOGGER.error('Generatrion of overall test report has failed.') +def generate_performance_matrix(selected_tests, results_path): + """ + Loads the results of all the currently run tests, compares them + based on the MATRIX_METRIC, outputs and saves the generated table. + :selected_tests: list of currently run test + :results_path: directory path to the results of current tests + """ + _LOGGER.info('Performance Matrix:') + test_list = [] + + for test in selected_tests: + test_name = test.get('Name', '') + test_deployment = test.get('Deployment', '') + test_list.append({'test_name':test_name, 'test_deployment':test_deployment, 'csv_data':False}) + + test_params = {} + output = [] + all_params = settings.getValue('_PARAMS_LIST') + for i in range(len(selected_tests)): + test = test_list[i] + if isinstance(all_params, list): + list_index = i + if i >= len(all_params): + list_index = len(all_params) - 1 + if settings.getValue('CUMULATIVE_PARAMS') and (i > 0): + test_params.update(all_params[list_index]) + else: + test_params = all_params[list_index] + else: + test_params = all_params + settings.setValue('TEST_PARAMS', test_params) + test['test_params'] = copy.deepcopy(test_params) + try: + with open("{}/result_{}_{}_{}.csv".format(results_path, str(i), + test['test_name'], test['test_deployment'])) as csvfile: + reader = list(csv.DictReader(csvfile)) + test['csv_data'] = reader[0] + # pylint: disable=broad-except + except (Exception) as ex: + _LOGGER.error("Result file not found: %s", ex) + + metric = settings.getValue('MATRIX_METRIC') + change = {} + output_header = ("ID", "Name", metric, "Change [%]", "Parameters, "\ + "CUMULATIVE_PARAMS = {}".format(settings.getValue('CUMULATIVE_PARAMS'))) + if not test_list[0]['csv_data'] or float(test_list[0]['csv_data'][metric]) == 0: + _LOGGER.error("Incorrect format of test results") + return + for i, test in enumerate(test_list): + if test['csv_data']: + change[i] = float(test['csv_data'][metric])/\ + (float(test_list[0]['csv_data'][metric]) / 100) - 100 + output.append([i, test['test_name'], float(test['csv_data'][metric]), + change[i], str(test['test_params'])[1:-1]]) + else: + change[i] = 0 + output.append([i, test['test_name'], "Test Failed", 0, test['test_params']]) + print(tabulate(output, headers=output_header, tablefmt="grid", floatfmt="0.3f")) + with open(results_path + '/result_performance_matrix.rst', 'w+') as output_file: + output_file.write(_TEMPLATE_MATRIX.format(metric, tabulate(output, headers=output_header, + tablefmt="rst", floatfmt="0.3f"))) + _LOGGER.info('Performance matrix written to: "%s/result_performance_matrix.rst"', results_path) + def enable_sriov(nic_list): """ Enable SRIOV for given enhanced PCI IDs @@ -338,18 +504,20 @@ def enable_sriov(nic_list): sriov_nic.update({tmp_nic[0] : int(tmp_nic[1][2:])}) # sriov is required for some NICs - if len(sriov_nic): + if sriov_nic: for nic in sriov_nic: # check if SRIOV is supported and enough virt interfaces are available if not networkcard.is_sriov_supported(nic) \ or networkcard.get_sriov_numvfs(nic) <= sriov_nic[nic]: # if not, enable and set appropriate number of VFs if not networkcard.set_sriov_numvfs(nic, sriov_nic[nic] + 1): - _LOGGER.error("SRIOV cannot be enabled for NIC %s", nic) - raise + raise RuntimeError('SRIOV cannot be enabled for NIC {}'.format(nic)) else: _LOGGER.debug("SRIOV enabled for NIC %s", nic) + # ensure that path to the bind tool is valid + functions.settings_update_paths() + # WORKAROUND: it has been observed with IXGBE(VF) driver, # that NIC doesn't correclty dispatch traffic to VFs based # on their MAC address. Unbind and bind to the same driver @@ -375,8 +543,7 @@ def disable_sriov(nic_list): for nic in nic_list: if networkcard.is_sriov_nic(nic): if not networkcard.set_sriov_numvfs(nic.split('|')[0], 0): - _LOGGER.error("SRIOV cannot be disabled for NIC %s", nic) - raise + raise RuntimeError('SRIOV cannot be disabled for NIC {}'.format(nic)) else: _LOGGER.debug("SRIOV disabled for NIC %s", nic.split('|')[0]) @@ -406,23 +573,42 @@ def handle_list_options(args): print(Loader().get_pktfwds_printable()) sys.exit(0) + if args['list_loadgens']: + print(Loader().get_loadgens_printable()) + sys.exit(0) + if args['list_settings']: print(str(settings)) sys.exit(0) if args['list']: - # configure tests - if args['integration']: - testcases = settings.getValue('INTEGRATION_TESTS') - else: - testcases = settings.getValue('PERFORMANCE_TESTS') + list_testcases(args) + sys.exit(0) - print("Available Tests:") - print("================") - for test in testcases: - print('* %-30s %s' % ('%s:' % test['Name'], test['Description'])) - sys.exit(0) +def list_testcases(args): + """ Print list of testcases requested by --list CLI argument + + :param args: A dictionary with all CLI arguments + """ + # configure tests + if args['integration']: + testcases = settings.getValue('INTEGRATION_TESTS') + else: + testcases = settings.getValue('PERFORMANCE_TESTS') + + print("Available Tests:") + print("================") + + for test in testcases: + description = functions.format_description(test['Description'], 70) + if len(test['Name']) < 40: + print('* {:40} {}'.format('{}:'.format(test['Name']), description[0])) + else: + print('* {}'.format('{}:'.format(test['Name']))) + print(' {:40} {}'.format('', description[0])) + for i in range(1, len(description)): + print(' {:40} {}'.format('', description[i])) def vsperf_finalize(): @@ -434,7 +620,7 @@ def vsperf_finalize(): if os.path.exists(results_path): files_list = os.listdir(results_path) if files_list == []: - _LOGGER.info("Removing empty result directory: " + results_path) + _LOGGER.info("Removing empty result directory: %s", results_path) shutil.rmtree(results_path) except AttributeError: # skip it if parameter doesn't exist @@ -476,7 +662,7 @@ class MockTestCase(unittest.TestCase): on how self.is_pass was set in the constructor""" self.assertTrue(self.is_pass, self.msg) - +# pylint: disable=too-many-locals, too-many-branches, too-many-statements def main(): """Main function. """ @@ -486,7 +672,22 @@ def main(): settings.load_from_dir(os.path.join(_CURR_DIR, 'conf')) - # Load non performance/integration tests + # define the timestamp to be used by logs and results + date = datetime.datetime.fromtimestamp(time.time()) + timestamp = date.strftime('%Y-%m-%d_%H-%M-%S') + settings.setValue('LOG_TIMESTAMP', timestamp) + + # generate results directory name + # integration test use vswitchd log in test step assertions, ensure that + # correct value will be set before loading integration test configuration + results_dir = "results_" + timestamp + results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir) + settings.setValue('RESULTS_PATH', results_path) + # create results directory + if not os.path.exists(results_path): + os.makedirs(results_path) + + # load non performance/integration tests if args['integration']: settings.load_from_dir(os.path.join(_CURR_DIR, 'conf/integration')) @@ -506,8 +707,8 @@ def main(): settings.setValue('mode', args['mode']) - # set dpdk and ovs paths according to VNF and VSWITCH - if settings.getValue('mode') != 'trafficgen': + # update paths to trafficgens if required + if settings.getValue('mode') == 'trafficgen': functions.settings_update_paths() # if required, handle list-* operations @@ -515,6 +716,9 @@ def main(): configure_logging(settings.getValue('VERBOSITY')) + # CI build support + _LOGGER.info("Creating result directory: %s", results_path) + # check and fix locale check_and_set_locale() @@ -529,7 +733,7 @@ def main(): # configuration validity checks if args['vswitch']: - vswitch_none = 'none' == args['vswitch'].strip().lower() + vswitch_none = args['vswitch'].strip().lower() == 'none' if vswitch_none: settings.setValue('VSWITCH', 'none') else: @@ -558,6 +762,14 @@ def main(): settings.getValue('VNF_DIR')) sys.exit(1) + if args['loadgen']: + loadgens = Loader().get_loadgens() + if args['loadgen'] not in loadgens: + _LOGGER.error('There are no loadgens matching \'%s\' found in' + ' \'%s\'. Exiting...', args['loadgen'], + settings.getValue('LOADGEN_DIR')) + sys.exit(1) + if args['exact_test_name'] and args['tests']: _LOGGER.error("Cannot specify tests with both positional args and --test.") sys.exit(1) @@ -579,36 +791,22 @@ def main(): 'driver' : networkcard.get_driver(tmp_nic), 'device' : networkcard.get_device_name(tmp_nic)}) else: - _LOGGER.error("Invalid network card PCI ID: '%s'", nic) vsperf_finalize() - raise + raise RuntimeError("Invalid network card PCI ID: '{}'".format(nic)) settings.setValue('NICS', nic_list) # for backward compatibility settings.setValue('WHITELIST_NICS', list(nic['pci'] for nic in nic_list)) - # generate results directory name - date = datetime.datetime.fromtimestamp(time.time()) - results_dir = "results_" + date.strftime('%Y-%m-%d_%H-%M-%S') - results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir) - settings.setValue('RESULTS_PATH', results_path) - - # create results directory - if not os.path.exists(results_path): - _LOGGER.info("Creating result directory: " + results_path) - os.makedirs(results_path) + # pylint: disable=too-many-nested-blocks if settings.getValue('mode') == 'trafficgen': # execute only traffic generator _LOGGER.debug("Executing traffic generator:") loader = Loader() # set traffic details, so they can be passed to traffic ctl - traffic = copy.deepcopy(TRAFFIC_DEFAULTS) - traffic.update({'traffic_type': get_test_param('traffic_type', TRAFFIC_DEFAULTS['traffic_type']), - 'bidir': get_test_param('bidirectional', TRAFFIC_DEFAULTS['bidir']), - 'multistream': int(get_test_param('multistream', TRAFFIC_DEFAULTS['multistream'])), - 'stream_type': get_test_param('stream_type', TRAFFIC_DEFAULTS['stream_type']), - 'frame_rate': int(get_test_param('iload', TRAFFIC_DEFAULTS['frame_rate']))}) + traffic = copy.deepcopy(settings.getValue('TRAFFIC')) + traffic = functions.check_traffic(traffic) traffic_ctl = component_factory.create_traffic( traffic['traffic_type'], @@ -631,7 +829,11 @@ def main(): if args['exact_test_name']: exact_names = args['exact_test_name'] # positional args => exact matches only - selected_tests = [test for test in testcases if test['Name'] in exact_names] + selected_tests = [] + for test_name in exact_names: + for test in testcases: + if test['Name'] == test_name: + selected_tests.append(test) elif args['tests']: # --tests => apply filter to select requested tests selected_tests = apply_filter(testcases, args['tests']) @@ -639,54 +841,83 @@ def main(): # Default - run all tests selected_tests = testcases - if not len(selected_tests): + if not selected_tests: _LOGGER.error("No tests matched --tests option or positional args. Done.") vsperf_finalize() sys.exit(1) - # run tests suite = unittest.TestSuite() - for cfg in selected_tests: + settings_snapshot = copy.deepcopy(settings.__dict__) + + for i, cfg in enumerate(selected_tests): + settings.setValue('_TEST_INDEX', i) test_name = cfg.get('Name', '') try: + test_params = settings.getValue('_PARAMS_LIST') + if isinstance(test_params, list): + list_index = i + if i >= len(test_params): + list_index = len(test_params) - 1 + test_params = test_params[list_index] + if settings.getValue('CUMULATIVE_PARAMS'): + test_params = merge_spec(settings.getValue('TEST_PARAMS'), test_params) + settings.setValue('TEST_PARAMS', test_params) + if args['integration']: test = IntegrationTestCase(cfg) else: test = PerformanceTestCase(cfg) + test.run() suite.addTest(MockTestCase('', True, test.name)) - #pylint: disable=broad-except + + # pylint: disable=broad-except except (Exception) as ex: _LOGGER.exception("Failed to run test: %s", test_name) suite.addTest(MockTestCase(str(ex), False, test_name)) _LOGGER.info("Continuing with next test...") + finally: + if not settings.getValue('CUMULATIVE_PARAMS'): + settings.restore_from_dict(settings_snapshot) + + settings.restore_from_dict(settings_snapshot) + + + # Generate and printout Performance Matrix + if args['matrix']: + generate_performance_matrix(selected_tests, results_path) # generate final rst report with results of all executed TCs generate_final_report() + + if settings.getValue('XUNIT'): xmlrunner.XMLTestRunner( output=settings.getValue('XUNIT_DIR'), outsuffix="", verbosity=0).run(suite) - if args['opnfvpod']: - pod_name = args['opnfvpod'] - installer_name = settings.getValue('OPNFV_INSTALLER') + if args['opnfvpod'] or settings.getValue('OPNFVPOD'): + pod_name = (args['opnfvpod'] if args['opnfvpod'] else + settings.getValue('OPNFVPOD')) + installer_name = str(settings.getValue('OPNFV_INSTALLER')).lower() opnfv_url = settings.getValue('OPNFV_URL') pkg_list = settings.getValue('PACKAGE_LIST') - int_data = {'vanilla': False, - 'pod': pod_name, + int_data = {'pod': pod_name, + 'build_tag': get_build_tag(), 'installer': installer_name, 'pkg_list': pkg_list, - 'db_url': opnfv_url} - if settings.getValue('VSWITCH').endswith('Vanilla'): - int_data['vanilla'] = True - opnfvdashboard.results2opnfv_dashboard(results_path, int_data) + 'db_url': opnfv_url, + # pass vswitch name from configuration to be used for failed + # TCs; In case of successful TCs it is safer to use vswitch + # name from CSV as TC can override global configuration + 'vswitch': str(settings.getValue('VSWITCH')).lower()} + tc_names = [tc['Name'] for tc in selected_tests] + opnfvdashboard.results2opnfv_dashboard(tc_names, results_path, int_data) # cleanup before exit vsperf_finalize() if __name__ == "__main__": main() -