3 # Copyright 2015-2017 Intel Corporation.
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
17 """VSPERF main script.
36 from tabulate import tabulate
37 from conf import merge_spec
38 from conf import settings
39 import core.component_factory as component_factory
40 from core.loader import Loader
41 from testcases import PerformanceTestCase
42 from testcases import IntegrationTestCase
43 from tools import tasks
44 from tools import networkcard
45 from tools import functions
46 from tools.pkt_gen import trafficgen
47 from tools.opnfvdashboard import opnfvdashboard
48 sys.dont_write_bytecode = True
51 'debug': logging.DEBUG,
53 'warning': logging.WARNING,
54 'error': logging.ERROR,
55 'critical': logging.CRITICAL
58 _CURR_DIR = os.path.dirname(os.path.realpath(__file__))
60 _TEMPLATE_RST = {'head' : os.path.join(_CURR_DIR, 'tools/report/report_head.rst'),
61 'foot' : os.path.join(_CURR_DIR, 'tools/report/report_foot.rst'),
62 'final' : 'test_report.rst',
63 'tmp' : os.path.join(_CURR_DIR, 'tools/report/report_tmp_caption.rst')
66 _TEMPLATE_MATRIX = "Performance Matrix\n------------------\n\n"\
67 "The following performance matrix was generated with the results of all the\n"\
68 "currently run tests. The metric used for comparison is {}.\n\n{}\n\n"
70 _LOGGER = logging.getLogger()
72 def parse_param_string(values):
74 Parse and split a single '--test-params' argument.
76 This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
77 values. For multiple overrides use a ; separated list for
78 e.g. --test-params 'x=z; y=(a,b)'
85 for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
90 # values are passed inside string from CLI, so we must retype them accordingly
92 results[param] = ast.literal_eval(value)
94 # for backward compatibility, we have to accept strings without quotes
95 _LOGGER.warning("Adding missing quotes around string value: %s = %s",
97 results[param] = str(value)
103 def parse_arguments():
105 Parse command line arguments.
107 class _SplitTestParamsAction(argparse.Action):
109 Parse and split '--test-params' arguments.
111 This expects either a single list of ; separated overrides
112 as 'x=y', 'x=y,z' or 'x' (implicit true) values.
113 e.g. --test-params 'x=z; y=(a,b)'
114 Or a list of these ; separated lists with overrides for
116 e.g. --test-params "['x=z; y=(a,b)','x=z']"
118 def __call__(self, parser, namespace, values, option_string=None):
120 input_list = ast.literal_eval(values)
122 for test_params in input_list:
123 parameter_list.append(parse_param_string(test_params))
125 parameter_list = parse_param_string(values)
126 results = {'_PARAMS_LIST':parameter_list}
127 setattr(namespace, self.dest, results)
129 class _ValidateFileAction(argparse.Action):
130 """Validate a file can be read from before using it.
132 def __call__(self, parser, namespace, values, option_string=None):
133 if not os.path.isfile(values):
134 raise argparse.ArgumentTypeError(
135 'the path \'%s\' is not a valid path' % values)
136 elif not os.access(values, os.R_OK):
137 raise argparse.ArgumentTypeError(
138 'the path \'%s\' is not accessible' % values)
140 setattr(namespace, self.dest, values)
142 class _ValidateDirAction(argparse.Action):
143 """Validate a directory can be written to before using it.
145 def __call__(self, parser, namespace, values, option_string=None):
146 if not os.path.isdir(values):
147 raise argparse.ArgumentTypeError(
148 'the path \'%s\' is not a valid path' % values)
149 elif not os.access(values, os.W_OK):
150 raise argparse.ArgumentTypeError(
151 'the path \'%s\' is not accessible' % values)
153 setattr(namespace, self.dest, values)
155 def list_logging_levels():
156 """Give a summary of all available logging levels.
158 :return: List of verbosity level names in decreasing order of
161 return sorted(VERBOSITY_LEVELS.keys(),
162 key=lambda x: VERBOSITY_LEVELS[x])
164 parser = argparse.ArgumentParser(prog=__file__, formatter_class=
165 argparse.ArgumentDefaultsHelpFormatter)
166 parser.add_argument('--version', action='version', version='%(prog)s 0.2')
167 parser.add_argument('--list', '--list-tests', action='store_true',
168 help='list all tests and exit')
169 parser.add_argument('--list-trafficgens', action='store_true',
170 help='list all traffic generators and exit')
171 parser.add_argument('--list-collectors', action='store_true',
172 help='list all system metrics loggers and exit')
173 parser.add_argument('--list-vswitches', action='store_true',
174 help='list all system vswitches and exit')
175 parser.add_argument('--list-fwdapps', action='store_true',
176 help='list all system forwarding applications and exit')
177 parser.add_argument('--list-vnfs', action='store_true',
178 help='list all system vnfs and exit')
179 parser.add_argument('--list-loadgens', action='store_true',
180 help='list all background load generators')
181 parser.add_argument('--list-settings', action='store_true',
182 help='list effective settings configuration and exit')
183 parser.add_argument('exact_test_name', nargs='*', help='Exact names of\
184 tests to run. E.g "vsperf phy2phy_tput phy2phy_cont"\
185 runs only the two tests with those exact names.\
186 To run all tests omit both positional args and --tests arg.')
188 group = parser.add_argument_group('test selection options')
189 group.add_argument('-m', '--mode', help='vsperf mode of operation;\
190 Values: "normal" - execute vSwitch, VNF and traffic generator;\
191 "trafficgen" - execute only traffic generator; "trafficgen-off" \
192 - execute vSwitch and VNF; trafficgen-pause - execute vSwitch \
193 and VNF but pause before traffic transmission ', default='normal')
195 group.add_argument('-f', '--test-spec', help='test specification file')
196 group.add_argument('-d', '--test-dir', help='directory containing tests')
197 group.add_argument('-t', '--tests', help='Comma-separated list of terms \
198 indicating tests to run. e.g. "RFC2544,!p2p" - run all tests whose\
199 name contains RFC2544 less those containing "p2p"; "!back2back" - \
200 run all tests except those containing back2back')
201 group.add_argument('--verbosity', choices=list_logging_levels(),
203 group.add_argument('--integration', action='store_true', help='execute integration tests')
204 group.add_argument('--trafficgen', help='traffic generator to use')
205 group.add_argument('--vswitch', help='vswitch implementation to use')
206 group.add_argument('--fwdapp', help='packet forwarding application to use')
207 group.add_argument('--vnf', help='vnf to use')
208 group.add_argument('--loadgen', help='loadgen to use')
209 group.add_argument('--sysmetrics', help='system metrics logger to use')
210 group = parser.add_argument_group('test behavior options')
211 group.add_argument('--xunit', action='store_true',
212 help='enable xUnit-formatted output')
213 group.add_argument('--xunit-dir', action=_ValidateDirAction,
214 help='output directory of xUnit-formatted output')
215 group.add_argument('--load-env', action='store_true',
216 help='enable loading of settings from the environment')
217 group.add_argument('--conf-file', action=_ValidateFileAction,
218 help='settings file')
219 group.add_argument('--test-params', action=_SplitTestParamsAction,
220 help='csv list of test parameters: key=val; e.g. '
221 'TRAFFICGEN_PKT_SIZES=(64,128);TRAFFICGEN_DURATION=30; '
222 'GUEST_LOOPBACK=["l2fwd"] ...'
223 ' or a list of csv lists of test parameters: key=val; e.g. '
224 '[\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)\','
225 '\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)\']')
226 group.add_argument('--opnfvpod', help='name of POD in opnfv')
227 group.add_argument('--matrix', help='enable performance matrix analysis',
228 action='store_true', default=False)
230 args = vars(parser.parse_args())
235 def configure_logging(level):
236 """Configure logging.
238 name, ext = os.path.splitext(settings.getValue('LOG_FILE_DEFAULT'))
239 rename_default = "{name}_{uid}{ex}".format(name=name,
240 uid=settings.getValue(
243 log_file_default = os.path.join(
244 settings.getValue('RESULTS_PATH'), rename_default)
245 name, ext = os.path.splitext(settings.getValue('LOG_FILE_HOST_CMDS'))
246 rename_hostcmd = "{name}_{uid}{ex}".format(name=name,
247 uid=settings.getValue(
250 log_file_host_cmds = os.path.join(
251 settings.getValue('RESULTS_PATH'), rename_hostcmd)
252 name, ext = os.path.splitext(settings.getValue('LOG_FILE_TRAFFIC_GEN'))
253 rename_traffic = "{name}_{uid}{ex}".format(name=name,
254 uid=settings.getValue(
257 log_file_traffic_gen = os.path.join(
258 settings.getValue('RESULTS_PATH'), rename_traffic)
259 metrics_file = (settings.getValue('LOG_FILE_INFRA_METRICS_PFX') +
260 settings.getValue('LOG_TIMESTAMP') + '.log')
261 log_file_infra_metrics = os.path.join(settings.getValue('LOG_DIR'),
264 _LOGGER.setLevel(logging.DEBUG)
266 stream_logger = logging.StreamHandler(sys.stdout)
267 stream_logger.setLevel(VERBOSITY_LEVELS[level])
268 stream_logger.setFormatter(logging.Formatter(
269 '[%(levelname)-5s] %(asctime)s : (%(name)s) - %(message)s'))
270 _LOGGER.addHandler(stream_logger)
272 file_logger = logging.FileHandler(filename=log_file_default)
273 file_logger.setLevel(logging.DEBUG)
274 file_logger.setFormatter(logging.Formatter(
275 '%(asctime)s : %(message)s'))
276 _LOGGER.addHandler(file_logger)
278 class CommandFilter(logging.Filter):
279 """Filter out strings beginning with 'cmd :'"""
280 def filter(self, record):
281 return record.getMessage().startswith(tasks.CMD_PREFIX)
283 class TrafficGenCommandFilter(logging.Filter):
284 """Filter out strings beginning with 'gencmd :'"""
285 def filter(self, record):
286 return record.getMessage().startswith(trafficgen.CMD_PREFIX)
288 class CollectdMetricsFilter(logging.Filter):
289 """Filter out strings beginning with 'COLLECTD' :'"""
290 def filter(self, record):
291 return record.getMessage().startswith('COLLECTD')
293 cmd_logger = logging.FileHandler(filename=log_file_host_cmds)
294 cmd_logger.setLevel(logging.DEBUG)
295 cmd_logger.addFilter(CommandFilter())
296 _LOGGER.addHandler(cmd_logger)
298 gen_logger = logging.FileHandler(filename=log_file_traffic_gen)
299 gen_logger.setLevel(logging.DEBUG)
300 gen_logger.addFilter(TrafficGenCommandFilter())
301 _LOGGER.addHandler(gen_logger)
303 if settings.getValue('COLLECTOR') == 'Collectd':
304 met_logger = logging.FileHandler(filename=log_file_infra_metrics)
305 met_logger.setLevel(logging.DEBUG)
306 met_logger.addFilter(CollectdMetricsFilter())
307 _LOGGER.addHandler(met_logger)
310 def apply_filter(tests, tc_filter):
311 """Allow a subset of tests to be conveniently selected
313 :param tests: The list of Tests from which to select.
314 :param tc_filter: A case-insensitive string of comma-separated terms
315 indicating the Tests to select.
316 e.g. 'RFC' - select all tests whose name contains 'RFC'
317 e.g. 'RFC,burst' - select all tests whose name contains 'RFC' or
319 e.g. 'RFC,burst,!p2p' - select all tests whose name contains 'RFC'
320 or 'burst' and from these remove any containing 'p2p'.
321 e.g. '' - empty string selects all tests.
322 :return: A list of the selected Tests.
324 # if negative filter is first we have to start with full list of tests
325 if tc_filter.strip()[0] == '!':
329 if tc_filter is None:
332 for term in [x.strip() for x in tc_filter.lower().split(",")]:
333 if not term or term[0] != '!':
334 # Add matching tests from 'tests' into results
335 result.extend([test for test in tests \
336 if test['Name'].lower().find(term) >= 0])
338 # Term begins with '!' so we remove matching tests
339 result = [test for test in result \
340 if test['Name'].lower().find(term[1:]) < 0]
345 def check_and_set_locale():
346 """ Function will check locale settings. In case, that it isn't configured
347 properly, then default values specified by DEFAULT_LOCALE will be used.
350 system_locale = locale.getdefaultlocale()
351 if None in system_locale:
352 os.environ['LC_ALL'] = settings.getValue('DEFAULT_LOCALE')
353 _LOGGER.warning("Locale was not properly configured. Default values were set. Old locale: %s, New locale: %s",
354 system_locale, locale.getdefaultlocale())
356 def get_vswitch_names(rst_files):
357 """ Function will return a list of vSwitches detected in given ``rst_files``.
359 vswitch_names = set()
362 output = subprocess.check_output(['grep', '-h', '^* vSwitch'] + rst_files).decode().splitlines()
364 match = re.search(r'^\* vSwitch: ([^,]+)', str(line))
366 vswitch_names.add(match.group(1))
369 return list(vswitch_names)
371 except subprocess.CalledProcessError:
372 _LOGGER.warning('Cannot detect vSwitches used during testing.')
374 # fallback to the default value
378 """ Function will return a Jenkins job ID environment variable.
382 build_tag = os.environ['BUILD_TAG']
385 _LOGGER.warning('Cannot detect Jenkins job ID')
390 def generate_final_report():
391 """ Function will check if partial test results are available
392 and generates final report in rst format.
395 path = settings.getValue('RESULTS_PATH')
396 # check if there are any results in rst format
397 rst_results = glob.glob(os.path.join(path, 'result*rst'))
398 pkt_processors = get_vswitch_names(rst_results)
401 test_report = os.path.join(path, '{}_{}'.format('_'.join(pkt_processors), _TEMPLATE_RST['final']))
402 # create report caption directly - it is not worth to execute jinja machinery
403 report_caption = '{}\n{} {}\n{}\n\n'.format(
404 '============================================================',
405 'Performance report for',
406 ', '.join(pkt_processors),
407 '============================================================')
409 with open(_TEMPLATE_RST['tmp'], 'w') as file_:
410 file_.write(report_caption)
412 retval = subprocess.call('cat {} {} {} {} > {}'.format(_TEMPLATE_RST['tmp'], _TEMPLATE_RST['head'],
413 ' '.join(rst_results), _TEMPLATE_RST['foot'],
414 test_report), shell=True)
415 if retval == 0 and os.path.isfile(test_report):
416 _LOGGER.info('Overall test report written to "%s"', test_report)
418 _LOGGER.error('Generation of overall test report has failed.')
420 # remove temporary file
421 os.remove(_TEMPLATE_RST['tmp'])
423 except subprocess.CalledProcessError:
424 _LOGGER.error('Generatrion of overall test report has failed.')
427 def generate_performance_matrix(selected_tests, results_path):
429 Loads the results of all the currently run tests, compares them
430 based on the MATRIX_METRIC, outputs and saves the generated table.
431 :selected_tests: list of currently run test
432 :results_path: directory path to the results of current tests
434 _LOGGER.info('Performance Matrix:')
437 for test in selected_tests:
438 test_name = test.get('Name', '<Name not set>')
439 test_deployment = test.get('Deployment', '<Deployment not set>')
440 test_list.append({'test_name':test_name, 'test_deployment':test_deployment, 'csv_data':False})
444 all_params = settings.getValue('_PARAMS_LIST')
445 for i in range(len(selected_tests)):
447 if isinstance(all_params, list):
449 if i >= len(all_params):
450 list_index = len(all_params) - 1
451 if settings.getValue('CUMULATIVE_PARAMS') and (i > 0):
452 test_params.update(all_params[list_index])
454 test_params = all_params[list_index]
456 test_params = all_params
457 settings.setValue('TEST_PARAMS', test_params)
458 test['test_params'] = copy.deepcopy(test_params)
460 with open("{}/result_{}_{}_{}.csv".format(results_path, str(i),
461 test['test_name'], test['test_deployment'])) as csvfile:
462 reader = list(csv.DictReader(csvfile))
463 test['csv_data'] = reader[0]
464 # pylint: disable=broad-except
465 except (Exception) as ex:
466 _LOGGER.error("Result file not found: %s", ex)
468 metric = settings.getValue('MATRIX_METRIC')
470 output_header = ("ID", "Name", metric, "Change [%]", "Parameters, "\
471 "CUMULATIVE_PARAMS = {}".format(settings.getValue('CUMULATIVE_PARAMS')))
472 if not test_list[0]['csv_data'] or float(test_list[0]['csv_data'][metric]) == 0:
473 _LOGGER.error("Incorrect format of test results")
475 for i, test in enumerate(test_list):
477 change[i] = float(test['csv_data'][metric])/\
478 (float(test_list[0]['csv_data'][metric]) / 100) - 100
479 output.append([i, test['test_name'], float(test['csv_data'][metric]),
480 change[i], str(test['test_params'])[1:-1]])
483 output.append([i, test['test_name'], "Test Failed", 0, test['test_params']])
484 print(tabulate(output, headers=output_header, tablefmt="grid", floatfmt="0.3f"))
485 with open(results_path + '/result_performance_matrix.rst', 'w+') as output_file:
486 output_file.write(_TEMPLATE_MATRIX.format(metric, tabulate(output, headers=output_header,
487 tablefmt="rst", floatfmt="0.3f")))
488 _LOGGER.info('Performance matrix written to: "%s/result_performance_matrix.rst"', results_path)
490 def enable_sriov(nic_list):
491 """ Enable SRIOV for given enhanced PCI IDs
493 :param nic_list: A list of enhanced PCI IDs
495 # detect if sriov is required
498 if networkcard.is_sriov_nic(nic):
499 tmp_nic = nic.split('|')
500 if tmp_nic[0] in sriov_nic:
501 if int(tmp_nic[1][2:]) > sriov_nic[tmp_nic[0]]:
502 sriov_nic[tmp_nic[0]] = int(tmp_nic[1][2:])
504 sriov_nic.update({tmp_nic[0] : int(tmp_nic[1][2:])})
506 # sriov is required for some NICs
508 for nic in sriov_nic:
509 # check if SRIOV is supported and enough virt interfaces are available
510 if not networkcard.is_sriov_supported(nic) \
511 or networkcard.get_sriov_numvfs(nic) <= sriov_nic[nic]:
512 # if not, enable and set appropriate number of VFs
513 if not networkcard.set_sriov_numvfs(nic, sriov_nic[nic] + 1):
514 raise RuntimeError('SRIOV cannot be enabled for NIC {}'.format(nic))
516 _LOGGER.debug("SRIOV enabled for NIC %s", nic)
518 # ensure that path to the bind tool is valid
519 functions.settings_update_paths()
521 # WORKAROUND: it has been observed with IXGBE(VF) driver,
522 # that NIC doesn't correclty dispatch traffic to VFs based
523 # on their MAC address. Unbind and bind to the same driver
525 networkcard.reinit_vfs(nic)
527 # After SRIOV is enabled it takes some time until network drivers
528 # properly initialize all cards.
529 # Wait also in case, that SRIOV was already configured as it can be
530 # configured automatically just before vsperf execution.
538 def disable_sriov(nic_list):
539 """ Disable SRIOV for given PCI IDs
541 :param nic_list: A list of enhanced PCI IDs
544 if networkcard.is_sriov_nic(nic):
545 if not networkcard.set_sriov_numvfs(nic.split('|')[0], 0):
546 raise RuntimeError('SRIOV cannot be disabled for NIC {}'.format(nic))
548 _LOGGER.debug("SRIOV disabled for NIC %s", nic.split('|')[0])
551 def handle_list_options(args):
552 """ Process --list cli arguments if needed
554 :param args: A dictionary with all CLI arguments
556 if args['list_trafficgens']:
557 print(Loader().get_trafficgens_printable())
560 if args['list_collectors']:
561 print(Loader().get_collectors_printable())
564 if args['list_vswitches']:
565 print(Loader().get_vswitches_printable())
568 if args['list_vnfs']:
569 print(Loader().get_vnfs_printable())
572 if args['list_fwdapps']:
573 print(Loader().get_pktfwds_printable())
576 if args['list_loadgens']:
577 print(Loader().get_loadgens_printable())
580 if args['list_settings']:
589 def list_testcases(args):
590 """ Print list of testcases requested by --list CLI argument
592 :param args: A dictionary with all CLI arguments
595 if args['integration']:
596 testcases = settings.getValue('INTEGRATION_TESTS')
598 testcases = settings.getValue('PERFORMANCE_TESTS')
600 print("Available Tests:")
601 print("================")
603 for test in testcases:
604 description = functions.format_description(test['Description'], 70)
605 if len(test['Name']) < 40:
606 print('* {:40} {}'.format('{}:'.format(test['Name']), description[0]))
608 print('* {}'.format('{}:'.format(test['Name'])))
609 print(' {:40} {}'.format('', description[0]))
610 for i in range(1, len(description)):
611 print(' {:40} {}'.format('', description[i]))
614 def vsperf_finalize():
615 """ Clean up before exit
617 # remove directory if no result files were created
619 results_path = settings.getValue('RESULTS_PATH')
620 if os.path.exists(results_path):
621 files_list = os.listdir(results_path)
623 _LOGGER.info("Removing empty result directory: %s", results_path)
624 shutil.rmtree(results_path)
625 except AttributeError:
626 # skip it if parameter doesn't exist
629 # disable SRIOV if needed
631 if settings.getValue('SRIOV_ENABLED'):
632 disable_sriov(settings.getValue('WHITELIST_NICS_ORIG'))
633 except AttributeError:
634 # skip it if parameter doesn't exist
638 class MockTestCase(unittest.TestCase):
639 """Allow use of xmlrunner to generate Jenkins compatible output without
640 using xmlrunner to actually run tests.
643 suite = unittest.TestSuite()
644 suite.addTest(MockTestCase('Test1 passed ', True, 'Test1'))
645 suite.addTest(MockTestCase('Test2 failed because...', False, 'Test2'))
646 xmlrunner.XMLTestRunner(...).run(suite)
649 def __init__(self, msg, is_pass, test_name):
652 self.is_pass = is_pass
654 #dynamically create a test method with the right name
655 #but point the method at our generic test method
656 setattr(MockTestCase, test_name, self.generic_test)
658 super(MockTestCase, self).__init__(test_name)
660 def generic_test(self):
661 """Provide a generic function that raises or not based
662 on how self.is_pass was set in the constructor"""
663 self.assertTrue(self.is_pass, self.msg)
665 # pylint: disable=too-many-locals, too-many-branches, too-many-statements
669 args = parse_arguments()
673 settings.load_from_dir(os.path.join(_CURR_DIR, 'conf'))
675 # Define the timestamp to be used by logs and results
676 date = datetime.datetime.fromtimestamp(time.time())
677 timestamp = date.strftime('%Y-%m-%d_%H-%M-%S')
678 settings.setValue('LOG_TIMESTAMP', timestamp)
680 # Load non performance/integration tests
681 if args['integration']:
682 settings.load_from_dir(os.path.join(_CURR_DIR, 'conf/integration'))
684 # load command line parameters first in case there are settings files
686 settings.load_from_dict(args)
688 if args['conf_file']:
689 settings.load_from_file(args['conf_file'])
692 settings.load_from_env()
694 # reload command line parameters since these should take higher priority
695 # than both a settings file and environment variables
696 settings.load_from_dict(args)
698 settings.setValue('mode', args['mode'])
700 # update paths to trafficgens if required
701 if settings.getValue('mode') == 'trafficgen':
702 functions.settings_update_paths()
704 # if required, handle list-* operations
705 handle_list_options(args)
707 # generate results directory name
708 results_dir = "results_" + timestamp
709 results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
710 settings.setValue('RESULTS_PATH', results_path)
712 # create results directory
713 if not os.path.exists(results_path):
714 os.makedirs(results_path)
716 configure_logging(settings.getValue('VERBOSITY'))
719 _LOGGER.info("Creating result directory: %s", results_path)
721 # check and fix locale
722 check_and_set_locale()
724 # configure trafficgens
725 if args['trafficgen']:
726 trafficgens = Loader().get_trafficgens()
727 if args['trafficgen'] not in trafficgens:
728 _LOGGER.error('There are no trafficgens matching \'%s\' found in'
729 ' \'%s\'. Exiting...', args['trafficgen'],
730 settings.getValue('TRAFFICGEN_DIR'))
733 # configuration validity checks
735 vswitch_none = args['vswitch'].strip().lower() == 'none'
737 settings.setValue('VSWITCH', 'none')
739 vswitches = Loader().get_vswitches()
740 if args['vswitch'] not in vswitches:
741 _LOGGER.error('There are no vswitches matching \'%s\' found in'
742 ' \'%s\'. Exiting...', args['vswitch'],
743 settings.getValue('VSWITCH_DIR'))
747 settings.setValue('PKTFWD', args['fwdapp'])
748 fwdapps = Loader().get_pktfwds()
749 if args['fwdapp'] not in fwdapps:
750 _LOGGER.error('There are no forwarding application'
751 ' matching \'%s\' found in'
752 ' \'%s\'. Exiting...', args['fwdapp'],
753 settings.getValue('PKTFWD_DIR'))
757 vnfs = Loader().get_vnfs()
758 if args['vnf'] not in vnfs:
759 _LOGGER.error('there are no vnfs matching \'%s\' found in'
760 ' \'%s\'. exiting...', args['vnf'],
761 settings.getValue('VNF_DIR'))
765 loadgens = Loader().get_loadgens()
766 if args['loadgen'] not in loadgens:
767 _LOGGER.error('There are no loadgens matching \'%s\' found in'
768 ' \'%s\'. Exiting...', args['loadgen'],
769 settings.getValue('LOADGEN_DIR'))
772 if args['exact_test_name'] and args['tests']:
773 _LOGGER.error("Cannot specify tests with both positional args and --test.")
776 # modify NIC configuration to decode enhanced PCI IDs
777 wl_nics_orig = list(networkcard.check_pci(pci) for pci in settings.getValue('WHITELIST_NICS'))
778 settings.setValue('WHITELIST_NICS_ORIG', wl_nics_orig)
780 # sriov handling is performed on checked/expanded PCI IDs
781 settings.setValue('SRIOV_ENABLED', enable_sriov(wl_nics_orig))
784 for nic in wl_nics_orig:
785 tmp_nic = networkcard.get_nic_info(nic)
787 nic_list.append({'pci' : tmp_nic,
788 'type' : 'vf' if networkcard.get_sriov_pf(tmp_nic) else 'pf',
789 'mac' : networkcard.get_mac(tmp_nic),
790 'driver' : networkcard.get_driver(tmp_nic),
791 'device' : networkcard.get_device_name(tmp_nic)})
794 raise RuntimeError("Invalid network card PCI ID: '{}'".format(nic))
796 settings.setValue('NICS', nic_list)
797 # for backward compatibility
798 settings.setValue('WHITELIST_NICS', list(nic['pci'] for nic in nic_list))
801 # pylint: disable=too-many-nested-blocks
802 if settings.getValue('mode') == 'trafficgen':
803 # execute only traffic generator
804 _LOGGER.debug("Executing traffic generator:")
806 # set traffic details, so they can be passed to traffic ctl
807 traffic = copy.deepcopy(settings.getValue('TRAFFIC'))
808 traffic = functions.check_traffic(traffic)
810 traffic_ctl = component_factory.create_traffic(
811 traffic['traffic_type'],
812 loader.get_trafficgen_class())
814 traffic_ctl.send_traffic(traffic)
815 _LOGGER.debug("Traffic Results:")
816 traffic_ctl.print_results()
818 # write results into CSV file
819 result_file = os.path.join(results_path, "result.csv")
820 PerformanceTestCase.write_result_to_file(traffic_ctl.get_results(), result_file)
823 if args['integration']:
824 testcases = settings.getValue('INTEGRATION_TESTS')
826 testcases = settings.getValue('PERFORMANCE_TESTS')
828 if args['exact_test_name']:
829 exact_names = args['exact_test_name']
830 # positional args => exact matches only
832 for test_name in exact_names:
833 for test in testcases:
834 if test['Name'] == test_name:
835 selected_tests.append(test)
837 # --tests => apply filter to select requested tests
838 selected_tests = apply_filter(testcases, args['tests'])
840 # Default - run all tests
841 selected_tests = testcases
843 if not selected_tests:
844 _LOGGER.error("No tests matched --tests option or positional args. Done.")
848 suite = unittest.TestSuite()
849 settings_snapshot = copy.deepcopy(settings.__dict__)
851 for i, cfg in enumerate(selected_tests):
852 settings.setValue('_TEST_INDEX', i)
853 test_name = cfg.get('Name', '<Name not set>')
855 test_params = settings.getValue('_PARAMS_LIST')
856 if isinstance(test_params, list):
858 if i >= len(test_params):
859 list_index = len(test_params) - 1
860 test_params = test_params[list_index]
861 if settings.getValue('CUMULATIVE_PARAMS'):
862 test_params = merge_spec(settings.getValue('TEST_PARAMS'), test_params)
863 settings.setValue('TEST_PARAMS', test_params)
865 if args['integration']:
866 test = IntegrationTestCase(cfg)
868 test = PerformanceTestCase(cfg)
871 suite.addTest(MockTestCase('', True, test.name))
873 # pylint: disable=broad-except
874 except (Exception) as ex:
875 _LOGGER.exception("Failed to run test: %s", test_name)
876 suite.addTest(MockTestCase(str(ex), False, test_name))
877 _LOGGER.info("Continuing with next test...")
879 if not settings.getValue('CUMULATIVE_PARAMS'):
880 settings.restore_from_dict(settings_snapshot)
882 settings.restore_from_dict(settings_snapshot)
885 # Generate and printout Performance Matrix
887 generate_performance_matrix(selected_tests, results_path)
889 # generate final rst report with results of all executed TCs
890 generate_final_report()
894 if settings.getValue('XUNIT'):
895 xmlrunner.XMLTestRunner(
896 output=settings.getValue('XUNIT_DIR'), outsuffix="",
897 verbosity=0).run(suite)
899 if args['opnfvpod'] or settings.getValue('OPNFVPOD'):
900 pod_name = (args['opnfvpod'] if args['opnfvpod'] else
901 settings.getValue('OPNFVPOD'))
902 installer_name = str(settings.getValue('OPNFV_INSTALLER')).lower()
903 opnfv_url = settings.getValue('OPNFV_URL')
904 pkg_list = settings.getValue('PACKAGE_LIST')
906 int_data = {'pod': pod_name,
907 'build_tag': get_build_tag(),
908 'installer': installer_name,
909 'pkg_list': pkg_list,
911 # pass vswitch name from configuration to be used for failed
912 # TCs; In case of successful TCs it is safer to use vswitch
913 # name from CSV as TC can override global configuration
914 'vswitch': str(settings.getValue('VSWITCH')).lower()}
915 tc_names = [tc['Name'] for tc in selected_tests]
916 opnfvdashboard.results2opnfv_dashboard(tc_names, results_path, int_data)
918 # cleanup before exit
921 if __name__ == "__main__":