Merge "Support: Container Metrics collection and Visualization"
[vswitchperf.git] / vsperf
1 #!/usr/bin/env python3
2
3 # Copyright 2015-2017 Intel Corporation.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 #   http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """VSPERF main script.
18 """
19
20 import logging
21 import os
22 import sys
23 import argparse
24 import re
25 import time
26 import csv
27 import datetime
28 import shutil
29 import unittest
30 import locale
31 import copy
32 import glob
33 import subprocess
34 import ast
35 import xmlrunner
36 from tabulate import tabulate
37 from conf import merge_spec
38 from conf import settings
39 import core.component_factory as component_factory
40 from core.loader import Loader
41 from testcases import PerformanceTestCase
42 from testcases import IntegrationTestCase
43 from tools import tasks
44 from tools import networkcard
45 from tools import functions
46 from tools.pkt_gen import trafficgen
47 from tools.opnfvdashboard import opnfvdashboard
48 sys.dont_write_bytecode = True
49
50 VERBOSITY_LEVELS = {
51     'debug': logging.DEBUG,
52     'info': logging.INFO,
53     'warning': logging.WARNING,
54     'error': logging.ERROR,
55     'critical': logging.CRITICAL
56 }
57
58 _CURR_DIR = os.path.dirname(os.path.realpath(__file__))
59
60 _TEMPLATE_RST = {'head'  : os.path.join(_CURR_DIR, 'tools/report/report_head.rst'),
61                  'foot'  : os.path.join(_CURR_DIR, 'tools/report/report_foot.rst'),
62                  'final' : 'test_report.rst',
63                  'tmp'   : os.path.join(_CURR_DIR, 'tools/report/report_tmp_caption.rst')
64                 }
65
66 _TEMPLATE_MATRIX = "Performance Matrix\n------------------\n\n"\
67                    "The following performance matrix was generated with the results of all the\n"\
68                    "currently run tests. The metric used for comparison is {}.\n\n{}\n\n"
69
70 _LOGGER = logging.getLogger()
71
72 def parse_param_string(values):
73     """
74     Parse and split a single '--test-params' argument.
75
76     This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
77     values. For multiple overrides use a ; separated list for
78     e.g. --test-params 'x=z; y=(a,b)'
79     """
80     results = {}
81
82     if values == '':
83         return {}
84
85     for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
86         param = param.strip()
87         value = value.strip()
88         if param:
89             if value:
90                 # values are passed inside string from CLI, so we must retype them accordingly
91                 try:
92                     results[param] = ast.literal_eval(value)
93                 except ValueError:
94                     # for backward compatibility, we have to accept strings without quotes
95                     _LOGGER.warning("Adding missing quotes around string value: %s = %s",
96                                     param, str(value))
97                     results[param] = str(value)
98             else:
99                 results[param] = True
100     return results
101
102
103 def parse_arguments():
104     """
105     Parse command line arguments.
106     """
107     class _SplitTestParamsAction(argparse.Action):
108         """
109         Parse and split '--test-params' arguments.
110
111         This expects either a single list of ; separated overrides
112         as 'x=y', 'x=y,z' or 'x' (implicit true) values.
113         e.g. --test-params 'x=z; y=(a,b)'
114         Or a list of these ; separated lists with overrides for
115         multiple tests.
116         e.g. --test-params "['x=z; y=(a,b)','x=z']"
117         """
118         def __call__(self, parser, namespace, values, option_string=None):
119             if values[0] == '[':
120                 input_list = ast.literal_eval(values)
121                 parameter_list = []
122                 for test_params in input_list:
123                     parameter_list.append(parse_param_string(test_params))
124             else:
125                 parameter_list = parse_param_string(values)
126             results = {'_PARAMS_LIST':parameter_list}
127             setattr(namespace, self.dest, results)
128
129     class _ValidateFileAction(argparse.Action):
130         """Validate a file can be read from before using it.
131         """
132         def __call__(self, parser, namespace, values, option_string=None):
133             if not os.path.isfile(values):
134                 raise argparse.ArgumentTypeError(
135                     'the path \'%s\' is not a valid path' % values)
136             elif not os.access(values, os.R_OK):
137                 raise argparse.ArgumentTypeError(
138                     'the path \'%s\' is not accessible' % values)
139
140             setattr(namespace, self.dest, values)
141
142     class _ValidateDirAction(argparse.Action):
143         """Validate a directory can be written to before using it.
144         """
145         def __call__(self, parser, namespace, values, option_string=None):
146             if not os.path.isdir(values):
147                 raise argparse.ArgumentTypeError(
148                     'the path \'%s\' is not a valid path' % values)
149             elif not os.access(values, os.W_OK):
150                 raise argparse.ArgumentTypeError(
151                     'the path \'%s\' is not accessible' % values)
152
153             setattr(namespace, self.dest, values)
154
155     def list_logging_levels():
156         """Give a summary of all available logging levels.
157
158         :return: List of verbosity level names in decreasing order of
159             verbosity
160         """
161         return sorted(VERBOSITY_LEVELS.keys(),
162                       key=lambda x: VERBOSITY_LEVELS[x])
163
164     parser = argparse.ArgumentParser(prog=__file__, formatter_class=
165                                      argparse.ArgumentDefaultsHelpFormatter)
166     parser.add_argument('--version', action='version', version='%(prog)s 0.2')
167     parser.add_argument('--list', '--list-tests', action='store_true',
168                         help='list all tests and exit')
169     parser.add_argument('--list-trafficgens', action='store_true',
170                         help='list all traffic generators and exit')
171     parser.add_argument('--list-collectors', action='store_true',
172                         help='list all system metrics loggers and exit')
173     parser.add_argument('--list-vswitches', action='store_true',
174                         help='list all system vswitches and exit')
175     parser.add_argument('--list-fwdapps', action='store_true',
176                         help='list all system forwarding applications and exit')
177     parser.add_argument('--list-vnfs', action='store_true',
178                         help='list all system vnfs and exit')
179     parser.add_argument('--list-loadgens', action='store_true',
180                         help='list all background load generators')
181     parser.add_argument('--list-settings', action='store_true',
182                         help='list effective settings configuration and exit')
183     parser.add_argument('exact_test_name', nargs='*', help='Exact names of\
184             tests to run. E.g "vsperf phy2phy_tput phy2phy_cont"\
185             runs only the two tests with those exact names.\
186             To run all tests omit both positional args and --tests arg.')
187
188     group = parser.add_argument_group('test selection options')
189     group.add_argument('-m', '--mode', help='vsperf mode of operation;\
190             Values: "normal" - execute vSwitch, VNF and traffic generator;\
191             "trafficgen" - execute only traffic generator; "trafficgen-off" \
192             - execute vSwitch and VNF; trafficgen-pause - execute vSwitch \
193             and VNF but pause before traffic transmission ', default='normal')
194
195     group.add_argument('-f', '--test-spec', help='test specification file')
196     group.add_argument('-d', '--test-dir', help='directory containing tests')
197     group.add_argument('-t', '--tests', help='Comma-separated list of terms \
198             indicating tests to run. e.g. "RFC2544,!p2p" - run all tests whose\
199             name contains RFC2544 less those containing "p2p"; "!back2back" - \
200             run all tests except those containing back2back')
201     group.add_argument('--verbosity', choices=list_logging_levels(),
202                        help='debug level')
203     group.add_argument('--integration', action='store_true', help='execute integration tests')
204     group.add_argument('--trafficgen', help='traffic generator to use')
205     group.add_argument('--vswitch', help='vswitch implementation to use')
206     group.add_argument('--fwdapp', help='packet forwarding application to use')
207     group.add_argument('--vnf', help='vnf to use')
208     group.add_argument('--loadgen', help='loadgen to use')
209     group.add_argument('--sysmetrics', help='system metrics logger to use')
210     group = parser.add_argument_group('test behavior options')
211     group.add_argument('--xunit', action='store_true',
212                        help='enable xUnit-formatted output')
213     group.add_argument('--xunit-dir', action=_ValidateDirAction,
214                        help='output directory of xUnit-formatted output')
215     group.add_argument('--load-env', action='store_true',
216                        help='enable loading of settings from the environment')
217     group.add_argument('--conf-file', action=_ValidateFileAction,
218                        help='settings file')
219     group.add_argument('--test-params', action=_SplitTestParamsAction,
220                        help='csv list of test parameters: key=val; e.g. '
221                        'TRAFFICGEN_PKT_SIZES=(64,128);TRAFFICGEN_DURATION=30; '
222                        'GUEST_LOOPBACK=["l2fwd"] ...'
223                        ' or a list of csv lists of test parameters: key=val; e.g. '
224                        '[\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)\','
225                        '\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)\']')
226     group.add_argument('--opnfvpod', help='name of POD in opnfv')
227     group.add_argument('--matrix', help='enable performance matrix analysis',
228                        action='store_true', default=False)
229
230     args = vars(parser.parse_args())
231
232     return args
233
234
235 def configure_logging(level):
236     """Configure logging.
237     """
238     name, ext = os.path.splitext(settings.getValue('LOG_FILE_DEFAULT'))
239     rename_default = "{name}_{uid}{ex}".format(name=name,
240                                                uid=settings.getValue(
241                                                    'LOG_TIMESTAMP'),
242                                                ex=ext)
243     log_file_default = os.path.join(
244         settings.getValue('RESULTS_PATH'), rename_default)
245     name, ext = os.path.splitext(settings.getValue('LOG_FILE_HOST_CMDS'))
246     rename_hostcmd = "{name}_{uid}{ex}".format(name=name,
247                                                uid=settings.getValue(
248                                                    'LOG_TIMESTAMP'),
249                                                ex=ext)
250     log_file_host_cmds = os.path.join(
251         settings.getValue('RESULTS_PATH'), rename_hostcmd)
252     name, ext = os.path.splitext(settings.getValue('LOG_FILE_TRAFFIC_GEN'))
253     rename_traffic = "{name}_{uid}{ex}".format(name=name,
254                                                uid=settings.getValue(
255                                                    'LOG_TIMESTAMP'),
256                                                ex=ext)
257     log_file_traffic_gen = os.path.join(
258         settings.getValue('RESULTS_PATH'), rename_traffic)
259     metrics_file = (settings.getValue('LOG_FILE_INFRA_METRICS_PFX') +
260                     settings.getValue('LOG_TIMESTAMP') + '.log')
261     log_file_infra_metrics = os.path.join(settings.getValue('LOG_DIR'),
262                                           metrics_file)
263
264     _LOGGER.setLevel(logging.DEBUG)
265
266     stream_logger = logging.StreamHandler(sys.stdout)
267     stream_logger.setLevel(VERBOSITY_LEVELS[level])
268     stream_logger.setFormatter(logging.Formatter(
269         '[%(levelname)-5s]  %(asctime)s : (%(name)s) - %(message)s'))
270     _LOGGER.addHandler(stream_logger)
271
272     file_logger = logging.FileHandler(filename=log_file_default)
273     file_logger.setLevel(logging.DEBUG)
274     file_logger.setFormatter(logging.Formatter(
275         '%(asctime)s : %(message)s'))
276     _LOGGER.addHandler(file_logger)
277
278     class CommandFilter(logging.Filter):
279         """Filter out strings beginning with 'cmd :'"""
280         def filter(self, record):
281             return record.getMessage().startswith(tasks.CMD_PREFIX)
282
283     class TrafficGenCommandFilter(logging.Filter):
284         """Filter out strings beginning with 'gencmd :'"""
285         def filter(self, record):
286             return record.getMessage().startswith(trafficgen.CMD_PREFIX)
287
288     class CollectdMetricsFilter(logging.Filter):
289         """Filter out strings beginning with 'COLLECTD' :'"""
290         def filter(self, record):
291             return record.getMessage().startswith('COLLECTD')
292
293     cmd_logger = logging.FileHandler(filename=log_file_host_cmds)
294     cmd_logger.setLevel(logging.DEBUG)
295     cmd_logger.addFilter(CommandFilter())
296     _LOGGER.addHandler(cmd_logger)
297
298     gen_logger = logging.FileHandler(filename=log_file_traffic_gen)
299     gen_logger.setLevel(logging.DEBUG)
300     gen_logger.addFilter(TrafficGenCommandFilter())
301     _LOGGER.addHandler(gen_logger)
302
303     if settings.getValue('COLLECTOR') == 'Collectd':
304         met_logger = logging.FileHandler(filename=log_file_infra_metrics)
305         met_logger.setLevel(logging.DEBUG)
306         met_logger.addFilter(CollectdMetricsFilter())
307         _LOGGER.addHandler(met_logger)
308
309
310 def apply_filter(tests, tc_filter):
311     """Allow a subset of tests to be conveniently selected
312
313     :param tests: The list of Tests from which to select.
314     :param tc_filter: A case-insensitive string of comma-separated terms
315         indicating the Tests to select.
316         e.g. 'RFC' - select all tests whose name contains 'RFC'
317         e.g. 'RFC,burst' - select all tests whose name contains 'RFC' or
318             'burst'
319         e.g. 'RFC,burst,!p2p' - select all tests whose name contains 'RFC'
320             or 'burst' and from these remove any containing 'p2p'.
321         e.g. '' - empty string selects all tests.
322     :return: A list of the selected Tests.
323     """
324     # if negative filter is first we have to start with full list of tests
325     if tc_filter.strip()[0] == '!':
326         result = tests
327     else:
328         result = []
329     if tc_filter is None:
330         tc_filter = ""
331
332     for term in [x.strip() for x in tc_filter.lower().split(",")]:
333         if not term or term[0] != '!':
334             # Add matching tests from 'tests' into results
335             result.extend([test for test in tests \
336                 if test['Name'].lower().find(term) >= 0])
337         else:
338             # Term begins with '!' so we remove matching tests
339             result = [test for test in result \
340                 if test['Name'].lower().find(term[1:]) < 0]
341
342     return result
343
344
345 def check_and_set_locale():
346     """ Function will check locale settings. In case, that it isn't configured
347     properly, then default values specified by DEFAULT_LOCALE will be used.
348     """
349
350     system_locale = locale.getdefaultlocale()
351     if None in system_locale:
352         os.environ['LC_ALL'] = settings.getValue('DEFAULT_LOCALE')
353         _LOGGER.warning("Locale was not properly configured. Default values were set. Old locale: %s, New locale: %s",
354                         system_locale, locale.getdefaultlocale())
355
356 def get_vswitch_names(rst_files):
357     """ Function will return a list of vSwitches detected in given ``rst_files``.
358     """
359     vswitch_names = set()
360     if rst_files:
361         try:
362             output = subprocess.check_output(['grep', '-h', '^* vSwitch'] + rst_files).decode().splitlines()
363             for line in output:
364                 match = re.search(r'^\* vSwitch: ([^,]+)', str(line))
365                 if match:
366                     vswitch_names.add(match.group(1))
367
368             if vswitch_names:
369                 return list(vswitch_names)
370
371         except subprocess.CalledProcessError:
372             _LOGGER.warning('Cannot detect vSwitches used during testing.')
373
374     # fallback to the default value
375     return ['vSwitch']
376
377 def get_build_tag():
378     """ Function will return a Jenkins job ID environment variable.
379     """
380
381     try:
382         build_tag = os.environ['BUILD_TAG']
383
384     except KeyError:
385         _LOGGER.warning('Cannot detect Jenkins job ID')
386         build_tag = "none"
387
388     return build_tag
389
390 def generate_final_report():
391     """ Function will check if partial test results are available
392     and generates final report in rst format.
393     """
394
395     path = settings.getValue('RESULTS_PATH')
396     # check if there are any results in rst format
397     rst_results = glob.glob(os.path.join(path, 'result*rst'))
398     pkt_processors = get_vswitch_names(rst_results)
399     if rst_results:
400         try:
401             test_report = os.path.join(path, '{}_{}'.format('_'.join(pkt_processors), _TEMPLATE_RST['final']))
402             # create report caption directly - it is not worth to execute jinja machinery
403             report_caption = '{}\n{} {}\n{}\n\n'.format(
404                 '============================================================',
405                 'Performance report for',
406                 ', '.join(pkt_processors),
407                 '============================================================')
408
409             with open(_TEMPLATE_RST['tmp'], 'w') as file_:
410                 file_.write(report_caption)
411
412             retval = subprocess.call('cat {} {} {} {} > {}'.format(_TEMPLATE_RST['tmp'], _TEMPLATE_RST['head'],
413                                                                    ' '.join(rst_results), _TEMPLATE_RST['foot'],
414                                                                    test_report), shell=True)
415             if retval == 0 and os.path.isfile(test_report):
416                 _LOGGER.info('Overall test report written to "%s"', test_report)
417             else:
418                 _LOGGER.error('Generation of overall test report has failed.')
419
420             # remove temporary file
421             os.remove(_TEMPLATE_RST['tmp'])
422
423         except subprocess.CalledProcessError:
424             _LOGGER.error('Generatrion of overall test report has failed.')
425
426
427 def generate_performance_matrix(selected_tests, results_path):
428     """
429     Loads the results of all the currently run tests, compares them
430     based on the MATRIX_METRIC, outputs and saves the generated table.
431     :selected_tests: list of currently run test
432     :results_path: directory path to the results of current tests
433     """
434     _LOGGER.info('Performance Matrix:')
435     test_list = []
436
437     for test in selected_tests:
438         test_name = test.get('Name', '<Name not set>')
439         test_deployment = test.get('Deployment', '<Deployment not set>')
440         test_list.append({'test_name':test_name, 'test_deployment':test_deployment, 'csv_data':False})
441
442     test_params = {}
443     output = []
444     all_params = settings.getValue('_PARAMS_LIST')
445     for i in range(len(selected_tests)):
446         test = test_list[i]
447         if isinstance(all_params, list):
448             list_index = i
449             if i >= len(all_params):
450                 list_index = len(all_params) - 1
451             if settings.getValue('CUMULATIVE_PARAMS') and (i > 0):
452                 test_params.update(all_params[list_index])
453             else:
454                 test_params = all_params[list_index]
455         else:
456             test_params = all_params
457         settings.setValue('TEST_PARAMS', test_params)
458         test['test_params'] = copy.deepcopy(test_params)
459         try:
460             with open("{}/result_{}_{}_{}.csv".format(results_path, str(i),
461                                                       test['test_name'], test['test_deployment'])) as csvfile:
462                 reader = list(csv.DictReader(csvfile))
463                 test['csv_data'] = reader[0]
464         # pylint: disable=broad-except
465         except (Exception) as ex:
466             _LOGGER.error("Result file not found: %s", ex)
467
468     metric = settings.getValue('MATRIX_METRIC')
469     change = {}
470     output_header = ("ID", "Name", metric, "Change [%]", "Parameters, "\
471                      "CUMULATIVE_PARAMS = {}".format(settings.getValue('CUMULATIVE_PARAMS')))
472     if not test_list[0]['csv_data'] or float(test_list[0]['csv_data'][metric]) == 0:
473         _LOGGER.error("Incorrect format of test results")
474         return
475     for i, test in enumerate(test_list):
476         if test['csv_data']:
477             change[i] = float(test['csv_data'][metric])/\
478                         (float(test_list[0]['csv_data'][metric]) / 100) - 100
479             output.append([i, test['test_name'], float(test['csv_data'][metric]),
480                            change[i], str(test['test_params'])[1:-1]])
481         else:
482             change[i] = 0
483             output.append([i, test['test_name'], "Test Failed", 0, test['test_params']])
484     print(tabulate(output, headers=output_header, tablefmt="grid", floatfmt="0.3f"))
485     with open(results_path + '/result_performance_matrix.rst', 'w+') as output_file:
486         output_file.write(_TEMPLATE_MATRIX.format(metric, tabulate(output, headers=output_header,
487                                                                    tablefmt="rst", floatfmt="0.3f")))
488         _LOGGER.info('Performance matrix written to: "%s/result_performance_matrix.rst"', results_path)
489
490 def enable_sriov(nic_list):
491     """ Enable SRIOV for given enhanced PCI IDs
492
493     :param nic_list: A list of enhanced PCI IDs
494     """
495     # detect if sriov is required
496     sriov_nic = {}
497     for nic in nic_list:
498         if networkcard.is_sriov_nic(nic):
499             tmp_nic = nic.split('|')
500             if tmp_nic[0] in sriov_nic:
501                 if int(tmp_nic[1][2:]) > sriov_nic[tmp_nic[0]]:
502                     sriov_nic[tmp_nic[0]] = int(tmp_nic[1][2:])
503             else:
504                 sriov_nic.update({tmp_nic[0] : int(tmp_nic[1][2:])})
505
506     # sriov is required for some NICs
507     if sriov_nic:
508         for nic in sriov_nic:
509             # check if SRIOV is supported and enough virt interfaces are available
510             if not networkcard.is_sriov_supported(nic) \
511                 or networkcard.get_sriov_numvfs(nic) <= sriov_nic[nic]:
512                 # if not, enable and set appropriate number of VFs
513                 if not networkcard.set_sriov_numvfs(nic, sriov_nic[nic] + 1):
514                     raise RuntimeError('SRIOV cannot be enabled for NIC {}'.format(nic))
515                 else:
516                     _LOGGER.debug("SRIOV enabled for NIC %s", nic)
517
518                 # ensure that path to the bind tool is valid
519                 functions.settings_update_paths()
520
521                 # WORKAROUND: it has been observed with IXGBE(VF) driver,
522                 # that NIC doesn't correclty dispatch traffic to VFs based
523                 # on their MAC address. Unbind and bind to the same driver
524                 # solves this issue.
525                 networkcard.reinit_vfs(nic)
526
527         # After SRIOV is enabled it takes some time until network drivers
528         # properly initialize all cards.
529         # Wait also in case, that SRIOV was already configured as it can be
530         # configured automatically just before vsperf execution.
531         time.sleep(2)
532
533         return True
534
535     return False
536
537
538 def disable_sriov(nic_list):
539     """ Disable SRIOV for given PCI IDs
540
541     :param nic_list: A list of enhanced PCI IDs
542     """
543     for nic in nic_list:
544         if networkcard.is_sriov_nic(nic):
545             if not networkcard.set_sriov_numvfs(nic.split('|')[0], 0):
546                 raise RuntimeError('SRIOV cannot be disabled for NIC {}'.format(nic))
547             else:
548                 _LOGGER.debug("SRIOV disabled for NIC %s", nic.split('|')[0])
549
550
551 def handle_list_options(args):
552     """ Process --list cli arguments if needed
553
554     :param args: A dictionary with all CLI arguments
555     """
556     if args['list_trafficgens']:
557         print(Loader().get_trafficgens_printable())
558         sys.exit(0)
559
560     if args['list_collectors']:
561         print(Loader().get_collectors_printable())
562         sys.exit(0)
563
564     if args['list_vswitches']:
565         print(Loader().get_vswitches_printable())
566         sys.exit(0)
567
568     if args['list_vnfs']:
569         print(Loader().get_vnfs_printable())
570         sys.exit(0)
571
572     if args['list_fwdapps']:
573         print(Loader().get_pktfwds_printable())
574         sys.exit(0)
575
576     if args['list_loadgens']:
577         print(Loader().get_loadgens_printable())
578         sys.exit(0)
579
580     if args['list_settings']:
581         print(str(settings))
582         sys.exit(0)
583
584     if args['list']:
585         list_testcases(args)
586         sys.exit(0)
587
588
589 def list_testcases(args):
590     """ Print list of testcases requested by --list CLI argument
591
592     :param args: A dictionary with all CLI arguments
593     """
594     # configure tests
595     if args['integration']:
596         testcases = settings.getValue('INTEGRATION_TESTS')
597     else:
598         testcases = settings.getValue('PERFORMANCE_TESTS')
599
600     print("Available Tests:")
601     print("================")
602
603     for test in testcases:
604         description = functions.format_description(test['Description'], 70)
605         if len(test['Name']) < 40:
606             print('* {:40} {}'.format('{}:'.format(test['Name']), description[0]))
607         else:
608             print('* {}'.format('{}:'.format(test['Name'])))
609             print('  {:40} {}'.format('', description[0]))
610         for i in range(1, len(description)):
611             print('  {:40} {}'.format('', description[i]))
612
613
614 def vsperf_finalize():
615     """ Clean up before exit
616     """
617     # remove directory if no result files were created
618     try:
619         results_path = settings.getValue('RESULTS_PATH')
620         if os.path.exists(results_path):
621             files_list = os.listdir(results_path)
622             if files_list == []:
623                 _LOGGER.info("Removing empty result directory: %s", results_path)
624                 shutil.rmtree(results_path)
625     except AttributeError:
626         # skip it if parameter doesn't exist
627         pass
628
629     # disable SRIOV if needed
630     try:
631         if settings.getValue('SRIOV_ENABLED'):
632             disable_sriov(settings.getValue('WHITELIST_NICS_ORIG'))
633     except AttributeError:
634         # skip it if parameter doesn't exist
635         pass
636
637
638 class MockTestCase(unittest.TestCase):
639     """Allow use of xmlrunner to generate Jenkins compatible output without
640     using xmlrunner to actually run tests.
641
642     Usage:
643         suite = unittest.TestSuite()
644         suite.addTest(MockTestCase('Test1 passed ', True, 'Test1'))
645         suite.addTest(MockTestCase('Test2 failed because...', False, 'Test2'))
646         xmlrunner.XMLTestRunner(...).run(suite)
647     """
648
649     def __init__(self, msg, is_pass, test_name):
650         #remember the things
651         self.msg = msg
652         self.is_pass = is_pass
653
654         #dynamically create a test method with the right name
655         #but point the method at our generic test method
656         setattr(MockTestCase, test_name, self.generic_test)
657
658         super(MockTestCase, self).__init__(test_name)
659
660     def generic_test(self):
661         """Provide a generic function that raises or not based
662         on how self.is_pass was set in the constructor"""
663         self.assertTrue(self.is_pass, self.msg)
664
665 # pylint: disable=too-many-locals, too-many-branches, too-many-statements
666 def main():
667     """Main function.
668     """
669     args = parse_arguments()
670
671     # configure settings
672
673     settings.load_from_dir(os.path.join(_CURR_DIR, 'conf'))
674
675     # define the timestamp to be used by logs and results
676     date = datetime.datetime.fromtimestamp(time.time())
677     timestamp = date.strftime('%Y-%m-%d_%H-%M-%S')
678     settings.setValue('LOG_TIMESTAMP', timestamp)
679
680     # generate results directory name
681     # integration test use vswitchd log in test step assertions, ensure that
682     # correct value will be set before loading integration test configuration
683     results_dir = "results_" + timestamp
684     results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
685     settings.setValue('RESULTS_PATH', results_path)
686     # create results directory
687     if not os.path.exists(results_path):
688         os.makedirs(results_path)
689
690     # load non performance/integration tests
691     if args['integration']:
692         settings.load_from_dir(os.path.join(_CURR_DIR, 'conf/integration'))
693
694     # load command line parameters first in case there are settings files
695     # to be used
696     settings.load_from_dict(args)
697
698     if args['conf_file']:
699         settings.load_from_file(args['conf_file'])
700
701     if args['load_env']:
702         settings.load_from_env()
703
704     # reload command line parameters since these should take higher priority
705     # than both a settings file and environment variables
706     settings.load_from_dict(args)
707
708     settings.setValue('mode', args['mode'])
709
710     # update paths to trafficgens if required
711     if settings.getValue('mode') == 'trafficgen':
712         functions.settings_update_paths()
713
714     # if required, handle list-* operations
715     handle_list_options(args)
716
717     configure_logging(settings.getValue('VERBOSITY'))
718
719     # CI build support
720     _LOGGER.info("Creating result directory: %s", results_path)
721
722     # check and fix locale
723     check_and_set_locale()
724
725     # configure trafficgens
726     if args['trafficgen']:
727         trafficgens = Loader().get_trafficgens()
728         if args['trafficgen'] not in trafficgens:
729             _LOGGER.error('There are no trafficgens matching \'%s\' found in'
730                           ' \'%s\'. Exiting...', args['trafficgen'],
731                           settings.getValue('TRAFFICGEN_DIR'))
732             sys.exit(1)
733
734     # configuration validity checks
735     if args['vswitch']:
736         vswitch_none = args['vswitch'].strip().lower() == 'none'
737         if vswitch_none:
738             settings.setValue('VSWITCH', 'none')
739         else:
740             vswitches = Loader().get_vswitches()
741             if args['vswitch'] not in vswitches:
742                 _LOGGER.error('There are no vswitches matching \'%s\' found in'
743                               ' \'%s\'. Exiting...', args['vswitch'],
744                               settings.getValue('VSWITCH_DIR'))
745                 sys.exit(1)
746
747     if args['fwdapp']:
748         settings.setValue('PKTFWD', args['fwdapp'])
749         fwdapps = Loader().get_pktfwds()
750         if args['fwdapp'] not in fwdapps:
751             _LOGGER.error('There are no forwarding application'
752                           ' matching \'%s\' found in'
753                           ' \'%s\'. Exiting...', args['fwdapp'],
754                           settings.getValue('PKTFWD_DIR'))
755             sys.exit(1)
756
757     if args['vnf']:
758         vnfs = Loader().get_vnfs()
759         if args['vnf'] not in vnfs:
760             _LOGGER.error('there are no vnfs matching \'%s\' found in'
761                           ' \'%s\'. exiting...', args['vnf'],
762                           settings.getValue('VNF_DIR'))
763             sys.exit(1)
764
765     if args['loadgen']:
766         loadgens = Loader().get_loadgens()
767         if args['loadgen'] not in loadgens:
768             _LOGGER.error('There are no loadgens matching \'%s\' found in'
769                           ' \'%s\'. Exiting...', args['loadgen'],
770                           settings.getValue('LOADGEN_DIR'))
771             sys.exit(1)
772
773     if args['exact_test_name'] and args['tests']:
774         _LOGGER.error("Cannot specify tests with both positional args and --test.")
775         sys.exit(1)
776
777     # modify NIC configuration to decode enhanced PCI IDs
778     wl_nics_orig = list(networkcard.check_pci(pci) for pci in settings.getValue('WHITELIST_NICS'))
779     settings.setValue('WHITELIST_NICS_ORIG', wl_nics_orig)
780
781     # sriov handling is performed on checked/expanded PCI IDs
782     settings.setValue('SRIOV_ENABLED', enable_sriov(wl_nics_orig))
783
784     nic_list = []
785     for nic in wl_nics_orig:
786         tmp_nic = networkcard.get_nic_info(nic)
787         if tmp_nic:
788             nic_list.append({'pci' : tmp_nic,
789                              'type' : 'vf' if networkcard.get_sriov_pf(tmp_nic) else 'pf',
790                              'mac' : networkcard.get_mac(tmp_nic),
791                              'driver' : networkcard.get_driver(tmp_nic),
792                              'device' : networkcard.get_device_name(tmp_nic)})
793         else:
794             vsperf_finalize()
795             raise RuntimeError("Invalid network card PCI ID: '{}'".format(nic))
796
797     settings.setValue('NICS', nic_list)
798     # for backward compatibility
799     settings.setValue('WHITELIST_NICS', list(nic['pci'] for nic in nic_list))
800
801
802     # pylint: disable=too-many-nested-blocks
803     if settings.getValue('mode') == 'trafficgen':
804         # execute only traffic generator
805         _LOGGER.debug("Executing traffic generator:")
806         loader = Loader()
807         # set traffic details, so they can be passed to traffic ctl
808         traffic = copy.deepcopy(settings.getValue('TRAFFIC'))
809         traffic = functions.check_traffic(traffic)
810
811         traffic_ctl = component_factory.create_traffic(
812             traffic['traffic_type'],
813             loader.get_trafficgen_class())
814         with traffic_ctl:
815             traffic_ctl.send_traffic(traffic)
816         _LOGGER.debug("Traffic Results:")
817         traffic_ctl.print_results()
818
819         # write results into CSV file
820         result_file = os.path.join(results_path, "result.csv")
821         PerformanceTestCase.write_result_to_file(traffic_ctl.get_results(), result_file)
822     else:
823         # configure tests
824         if args['integration']:
825             testcases = settings.getValue('INTEGRATION_TESTS')
826         else:
827             testcases = settings.getValue('PERFORMANCE_TESTS')
828
829         if args['exact_test_name']:
830             exact_names = args['exact_test_name']
831             # positional args => exact matches only
832             selected_tests = []
833             for test_name in exact_names:
834                 for test in testcases:
835                     if test['Name'] == test_name:
836                         selected_tests.append(test)
837         elif args['tests']:
838             # --tests => apply filter to select requested tests
839             selected_tests = apply_filter(testcases, args['tests'])
840         else:
841             # Default - run all tests
842             selected_tests = testcases
843
844         if not selected_tests:
845             _LOGGER.error("No tests matched --tests option or positional args. Done.")
846             vsperf_finalize()
847             sys.exit(1)
848
849         suite = unittest.TestSuite()
850         settings_snapshot = copy.deepcopy(settings.__dict__)
851
852         for i, cfg in enumerate(selected_tests):
853             settings.setValue('_TEST_INDEX', i)
854             test_name = cfg.get('Name', '<Name not set>')
855             try:
856                 test_params = settings.getValue('_PARAMS_LIST')
857                 if isinstance(test_params, list):
858                     list_index = i
859                     if i >= len(test_params):
860                         list_index = len(test_params) - 1
861                     test_params = test_params[list_index]
862                 if settings.getValue('CUMULATIVE_PARAMS'):
863                     test_params = merge_spec(settings.getValue('TEST_PARAMS'), test_params)
864                 settings.setValue('TEST_PARAMS', test_params)
865
866                 if args['integration']:
867                     test = IntegrationTestCase(cfg)
868                 else:
869                     test = PerformanceTestCase(cfg)
870
871                 test.run()
872                 suite.addTest(MockTestCase('', True, test.name))
873
874             # pylint: disable=broad-except
875             except (Exception) as ex:
876                 _LOGGER.exception("Failed to run test: %s", test_name)
877                 suite.addTest(MockTestCase(str(ex), False, test_name))
878                 _LOGGER.info("Continuing with next test...")
879             finally:
880                 if not settings.getValue('CUMULATIVE_PARAMS'):
881                     settings.restore_from_dict(settings_snapshot)
882
883         settings.restore_from_dict(settings_snapshot)
884
885
886         # Generate and printout Performance Matrix
887         if args['matrix']:
888             generate_performance_matrix(selected_tests, results_path)
889
890         # generate final rst report with results of all executed TCs
891         generate_final_report()
892
893
894
895         if settings.getValue('XUNIT'):
896             xmlrunner.XMLTestRunner(
897                 output=settings.getValue('XUNIT_DIR'), outsuffix="",
898                 verbosity=0).run(suite)
899
900         if args['opnfvpod'] or settings.getValue('OPNFVPOD'):
901             pod_name = (args['opnfvpod'] if args['opnfvpod'] else
902                         settings.getValue('OPNFVPOD'))
903             installer_name = str(settings.getValue('OPNFV_INSTALLER')).lower()
904             opnfv_url = settings.getValue('OPNFV_URL')
905             pkg_list = settings.getValue('PACKAGE_LIST')
906
907             int_data = {'pod': pod_name,
908                         'build_tag': get_build_tag(),
909                         'installer': installer_name,
910                         'pkg_list': pkg_list,
911                         'db_url': opnfv_url,
912                         # pass vswitch name from configuration to be used for failed
913                         # TCs; In case of successful TCs it is safer to use vswitch
914                         # name from CSV as TC can override global configuration
915                         'vswitch': str(settings.getValue('VSWITCH')).lower()}
916             tc_names = [tc['Name'] for tc in selected_tests]
917             opnfvdashboard.results2opnfv_dashboard(tc_names, results_path, int_data)
918
919     # cleanup before exit
920     vsperf_finalize()
921
922 if __name__ == "__main__":
923     main()