bugfix: Parameter duration does not work
[vswitchperf.git] / vsperf
diff --git a/vsperf b/vsperf
index 50f0996..53f5507 100755 (executable)
--- a/vsperf
+++ b/vsperf
@@ -27,6 +27,9 @@ import shutil
 import unittest
 import xmlrunner
 import locale
+import copy
+import glob
+import subprocess
 
 sys.dont_write_bytecode = True
 
@@ -37,6 +40,8 @@ from testcases import TestCase
 from tools import tasks
 from tools.pkt_gen import trafficgen
 from tools.opnfvdashboard import opnfvdashboard
+from tools.pkt_gen.trafficgen.trafficgenhelper import TRAFFIC_DEFAULTS
+import core.component_factory as component_factory
 
 VERBOSITY_LEVELS = {
     'debug': logging.DEBUG,
@@ -46,6 +51,11 @@ VERBOSITY_LEVELS = {
     'critical': logging.CRITICAL
 }
 
+_TEMPLATE_RST = {'head'  : 'tools/report/report_head.rst',
+                 'foot'  : 'tools/report/report_foot.rst',
+                 'final' : 'test_report.rst',
+                 'tmp'   : 'tools/report/report_tmp_caption.rst'
+                }
 
 def parse_arguments():
     """
@@ -133,6 +143,12 @@ def parse_arguments():
             To run all tests omit both positional args and --tests arg.')
 
     group = parser.add_argument_group('test selection options')
+    group.add_argument('-m', '--mode', help='vsperf mode of operation;\
+            Values: "normal" - execute vSwitch, VNF and traffic generator;\
+            "trafficgen" - execute only traffic generator; "trafficgen-off" \
+            - execute vSwitch and VNF; trafficgen-pause - execute vSwitch \
+            and VNF but pause before traffic transmission ', default='normal')
+
     group.add_argument('-f', '--test-spec', help='test specification file')
     group.add_argument('-d', '--test-dir', help='directory containing tests')
     group.add_argument('-t', '--tests', help='Comma-separated list of terms \
@@ -140,11 +156,11 @@ def parse_arguments():
             name contains RFC2544 less those containing "p2p"')
     group.add_argument('--verbosity', choices=list_logging_levels(),
                        help='debug level')
+    group.add_argument('--run-integration', action='store_true', help='run integration tests')
     group.add_argument('--trafficgen', help='traffic generator to use')
     group.add_argument('--vswitch', help='vswitch implementation to use')
     group.add_argument('--fwdapp', help='packet forwarding application to use')
     group.add_argument('--vnf', help='vnf to use')
-    group.add_argument('--duration', help='traffic transmit duration')
     group.add_argument('--sysmetrics', help='system metrics logger to use')
     group = parser.add_argument_group('test behavior options')
     group.add_argument('--xunit', action='store_true',
@@ -253,6 +269,43 @@ def check_and_set_locale():
         logging.warning("Locale was not properly configured. Default values were set. Old locale: %s, New locale: %s",
                         system_locale, locale.getdefaultlocale())
 
+
+def generate_final_report(path):
+    """ Function will check if partial test results are available
+    and generates final report in rst format.
+    """
+
+    # check if there are any results in rst format
+    rst_results = glob.glob(os.path.join(path, 'result*rst'))
+    if len(rst_results):
+        try:
+            test_report = os.path.join(path, '{}_{}'.format(settings.getValue('VSWITCH'), _TEMPLATE_RST['final']))
+            # create report caption directly - it is not worth to execute jinja machinery
+            report_caption = '{}\n{} {}\n{}\n\n'.format(
+                '============================================================',
+                'Performance report for',
+                Loader().get_vswitches()[settings.getValue('VSWITCH')].__doc__.strip().split('\n')[0],
+
+                '============================================================')
+
+            with open(_TEMPLATE_RST['tmp'], 'w') as file_:
+                file_.write(report_caption)
+
+            retval = subprocess.call('cat {} {} {} {} > {}'.format(_TEMPLATE_RST['tmp'], _TEMPLATE_RST['head'],
+                                                                   ' '.join(rst_results), _TEMPLATE_RST['foot'],
+                                                                   test_report), shell=True)
+            if retval == 0 and os.path.isfile(test_report):
+                logging.info('Overall test report written to "%s"', test_report)
+            else:
+                logging.error('Generatrion of overall test report has failed.')
+
+            # remove temporary file
+            os.remove(_TEMPLATE_RST['tmp'])
+
+        except subprocess.CalledProcessError:
+            logging.error('Generatrion of overall test report has failed.')
+
+
 class MockTestCase(unittest.TestCase):
     """Allow use of xmlrunner to generate Jenkins compatible output without
     using xmlrunner to actually run tests.
@@ -290,6 +343,13 @@ def main():
 
     settings.load_from_dir('conf')
 
+    performance_test = True
+
+    # Load non performance/integration tests
+    if args['run_integration']:
+        performance_test = False
+        settings.load_from_dir('conf/integration')
+
     # load command line parameters first in case there are settings files
     # to be used
     settings.load_from_dict(args)
@@ -304,7 +364,7 @@ def main():
     # than both a settings file and environment variables
     settings.load_from_dict(args)
 
-    vswitch_none =  False
+    vswitch_none = False
     # set dpdk and ovs paths accorfing to VNF and VSWITCH
     if settings.getValue('VSWITCH').endswith('Vanilla'):
         # settings paths for Vanilla
@@ -332,7 +392,6 @@ def main():
     check_and_set_locale()
 
     # configure trafficgens
-
     if args['trafficgen']:
         trafficgens = Loader().get_trafficgens()
         if args['trafficgen'] not in trafficgens:
@@ -343,7 +402,7 @@ def main():
 
     # configure vswitch
     if args['vswitch']:
-        vswitch_none =  'none' == args['vswitch'].strip().lower()
+        vswitch_none = 'none' == args['vswitch'].strip().lower()
         if vswitch_none:
             settings.setValue('VSWITCH', 'none')
         else:
@@ -372,13 +431,6 @@ def main():
                           settings.getValue('vnf_dir'))
             sys.exit(1)
 
-    if args['duration']:
-        if args['duration'].isdigit() and int(args['duration']) > 0:
-            settings.setValue('duration', args['duration'])
-        else:
-            logging.error('The selected Duration is not a number')
-            sys.exit(1)
-
     # update global settings
     guest_loopback = get_test_param('guest_loopback', None)
     if guest_loopback:
@@ -387,119 +439,138 @@ def main():
             tmp_gl.append(guest_loopback)
         settings.setValue('GUEST_LOOPBACK', tmp_gl)
 
+    settings.setValue('mode', args['mode'])
+
     # generate results directory name
     date = datetime.datetime.fromtimestamp(time.time())
     results_dir = "results_" + date.strftime('%Y-%m-%d_%H-%M-%S')
     results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
 
-    # configure tests
-    testcases = settings.getValue('PERFORMANCE_TESTS')
-    all_tests = []
-    for cfg in testcases:
-        try:
-            all_tests.append(TestCase(cfg, results_path))
-        except (Exception) as _:
-            logger.exception("Failed to create test: %s",
-                             cfg.get('Name', '<Name not set>'))
-            raise
-
-    # if required, handle list-* operations
-
-    if args['list']:
-        print("Available Tests:")
-        print("======")
-        for test in all_tests:
-            print('* %-18s%s' % ('%s:' % test.name, test.desc))
-        exit()
-
-    if args['list_trafficgens']:
-        print(Loader().get_trafficgens_printable())
-        exit()
-
-    if args['list_collectors']:
-        print(Loader().get_collectors_printable())
-        exit()
-
-    if args['list_vswitches']:
-        print(Loader().get_vswitches_printable())
-        exit()
-
-    if args['list_fwdapps']:
-        print(Loader().get_pktfwds_printable())
-        exit()
-
-    if args['list_vnfs']:
-        print(Loader().get_vnfs_printable())
-        exit()
-
-    if args['list_settings']:
-        print(str(settings))
-        exit()
-
-    # select requested tests
-    if args['exact_test_name'] and args['tests']:
-        logger.error("Cannot specify tests with both positional args and --test.")
-        sys.exit(1)
-
-    if args['exact_test_name']:
-        exact_names = args['exact_test_name']
-        # positional args => exact matches only
-        selected_tests = [test for test in all_tests if test.name in exact_names]
-    elif args['tests']:
-        # --tests => apply filter to select requested tests
-        selected_tests = apply_filter(all_tests, args['tests'])
-    else:
-        # Default - run all tests
-        selected_tests = all_tests
-
-    if not selected_tests:
-        logger.error("No tests matched --test option or positional args. Done.")
-        sys.exit(1)
-
     # create results directory
     if not os.path.exists(results_path):
         logger.info("Creating result directory: "  + results_path)
         os.makedirs(results_path)
 
-    # run tests
-    suite = unittest.TestSuite()
-    for test in selected_tests:
-        try:
-            if vswitch_none:
-                if test.deployment.lower() != 'p2p':
-                    logging.error('\'none\' vswitch option supported only'
-                                  ' for p2p deployment.')
-                    sys.exit(1)
-            test.run()
-            suite.addTest(MockTestCase('', True, test.name))
-        #pylint: disable=broad-except
-        except (Exception) as ex:
-            logger.exception("Failed to run test: %s", test.name)
-            suite.addTest(MockTestCase(str(ex), False, test.name))
-            logger.info("Continuing with next test...")
-
-    if settings.getValue('XUNIT'):
-        xmlrunner.XMLTestRunner(
-            output=settings.getValue('XUNIT_DIR'), outsuffix="",
-            verbosity=0).run(suite)
-
-    if args['opnfvpod']:
-        pod_name = args['opnfvpod']
-        installer_name = settings.getValue('OPNFV_INSTALLER')
-        opnfv_url = settings.getValue('OPNFV_URL')
-        pkg_list = settings.getValue('PACKAGE_LIST')
-
-        int_data = {'cuse': False,
-                    'vanilla': False,
-                    'pod': pod_name,
-                    'installer': installer_name,
-                    'pkg_list': pkg_list,
-                    'db_url': opnfv_url}
-        if settings.getValue('VSWITCH').endswith('Vanilla'):
-            int_data['vanilla'] = True
-        if settings.getValue('VNF').endswith('Cuse'):
-            int_data['cuse'] = True
-        opnfvdashboard.results2opnfv_dashboard(results_path, int_data)
+    if settings.getValue('mode') == 'trafficgen':
+        # execute only traffic generator
+        logging.debug("Executing traffic generator:")
+        loader = Loader()
+        # set traffic details, so they can be passed to traffic ctl
+        traffic = copy.deepcopy(TRAFFIC_DEFAULTS)
+        traffic.update({'traffic_type': get_test_param('traffic_type', 'rfc2544'),
+                        'bidir': get_test_param('bidirectional', False),
+                        'multistream': int(get_test_param('multistream', 0)),
+                        'stream_type': get_test_param('stream_type', 'L4'),
+                        'frame_rate': int(get_test_param('iload', 100))})
+
+        traffic_ctl = component_factory.create_traffic(
+            traffic['traffic_type'],
+            loader.get_trafficgen_class())
+        with traffic_ctl:
+            traffic_ctl.send_traffic(traffic)
+        logging.debug("Traffic Results:")
+        traffic_ctl.print_results()
+    else:
+        # configure tests
+        testcases = settings.getValue('PERFORMANCE_TESTS')
+        if args['run_integration']:
+            testcases = settings.getValue('INTEGRATION_TESTS')
+
+        all_tests = []
+        for cfg in testcases:
+            try:
+                all_tests.append(TestCase(cfg, results_path, performance_test))
+            except (Exception) as _:
+                logger.exception("Failed to create test: %s",
+                                 cfg.get('Name', '<Name not set>'))
+                raise
+
+        # if required, handle list-* operations
+
+        if args['list']:
+            print("Available Tests:")
+            print("======")
+            for test in all_tests:
+                print('* %-18s%s' % ('%s:' % test.name, test.desc))
+            exit()
+
+        if args['list_trafficgens']:
+            print(Loader().get_trafficgens_printable())
+            exit()
+
+        if args['list_collectors']:
+            print(Loader().get_collectors_printable())
+            exit()
+
+        if args['list_vswitches']:
+            print(Loader().get_vswitches_printable())
+            exit()
+
+        if args['list_vnfs']:
+            print(Loader().get_vnfs_printable())
+            exit()
+
+        if args['list_settings']:
+            print(str(settings))
+            exit()
+
+        # select requested tests
+        if args['exact_test_name'] and args['tests']:
+            logger.error("Cannot specify tests with both positional args and --test.")
+            sys.exit(1)
+
+        if args['exact_test_name']:
+            exact_names = args['exact_test_name']
+            # positional args => exact matches only
+            selected_tests = [test for test in all_tests if test.name in exact_names]
+        elif args['tests']:
+            # --tests => apply filter to select requested tests
+            selected_tests = apply_filter(all_tests, args['tests'])
+        else:
+            # Default - run all tests
+            selected_tests = all_tests
+
+        if not selected_tests:
+            logger.error("No tests matched --test option or positional args. Done.")
+            sys.exit(1)
+
+        # run tests
+        suite = unittest.TestSuite()
+        for test in selected_tests:
+            try:
+                test.run()
+                suite.addTest(MockTestCase('', True, test.name))
+            #pylint: disable=broad-except
+            except (Exception) as ex:
+                logger.exception("Failed to run test: %s", test.name)
+                suite.addTest(MockTestCase(str(ex), False, test.name))
+                logger.info("Continuing with next test...")
+
+        # generate final rst report with results of all executed TCs
+        generate_final_report(results_path)
+
+        if settings.getValue('XUNIT'):
+            xmlrunner.XMLTestRunner(
+                output=settings.getValue('XUNIT_DIR'), outsuffix="",
+                verbosity=0).run(suite)
+
+        if args['opnfvpod']:
+            pod_name = args['opnfvpod']
+            installer_name = settings.getValue('OPNFV_INSTALLER')
+            opnfv_url = settings.getValue('OPNFV_URL')
+            pkg_list = settings.getValue('PACKAGE_LIST')
+
+            int_data = {'cuse': False,
+                        'vanilla': False,
+                        'pod': pod_name,
+                        'installer': installer_name,
+                        'pkg_list': pkg_list,
+                        'db_url': opnfv_url}
+            if settings.getValue('VSWITCH').endswith('Vanilla'):
+                int_data['vanilla'] = True
+            if settings.getValue('VNF').endswith('Cuse'):
+                int_data['cuse'] = True
+            opnfvdashboard.results2opnfv_dashboard(results_path, int_data)
 
     #remove directory if no result files were created.
     if os.path.exists(results_path):