IETF Draft: Comments from IETF-96
[vswitchperf.git] / testcases / testcase.py
index ff1247f..7f22c18 100644 (file)
 """TestCase base class
 """
 
+from collections import OrderedDict
+import copy
 import csv
+import logging
+import math
 import os
+import re
 import time
-import logging
 import subprocess
-import copy
-from collections import OrderedDict
 
+from conf import settings as S
+from conf import get_test_param
 import core.component_factory as component_factory
 from core.loader import Loader
 from core.results.results_constants import ResultsConstants
 from tools import tasks
 from tools import hugepages
+from tools import functions
 from tools.pkt_gen.trafficgen.trafficgenhelper import TRAFFIC_DEFAULTS
-from conf import settings as S
-from conf import get_test_param
+
 
 class TestCase(object):
     """TestCase base class
@@ -44,6 +48,7 @@ class TestCase(object):
             values.
         :param results_dir: Where the csv formatted results are written.
         """
+        self._testcase_start_time = time.time()
         self._hugepages_mounted = False
         self._traffic_ctl = None
         self._vnf_ctl = None
@@ -52,6 +57,26 @@ class TestCase(object):
         self._loadgen = None
         self._output_file = None
         self._tc_results = None
+        self._settings_original = {}
+        self._settings_paths_modified = False
+        self._testcast_run_time = None
+
+        # store all GUEST_ specific settings to keep original values before their expansion
+        for key in S.__dict__:
+            if key.startswith('GUEST_'):
+                self._settings_original[key] = S.getValue(key)
+
+        self._update_settings('VSWITCH', cfg.get('vSwitch', S.getValue('VSWITCH')))
+        self._update_settings('VNF', cfg.get('VNF', S.getValue('VNF')))
+        self._update_settings('TRAFFICGEN', cfg.get('Trafficgen', S.getValue('TRAFFICGEN')))
+        self._update_settings('TEST_PARAMS', cfg.get('Parameters', S.getValue('TEST_PARAMS')))
+
+        # update global settings
+        functions.settings_update_paths()
+        guest_loopback = get_test_param('guest_loopback', None)
+        if guest_loopback:
+            # we can put just one item, it'll be expanded automatically for all VMs
+            self._update_settings('GUEST_LOOPBACK', [guest_loopback])
 
         # set test parameters; CLI options take precedence to testcase settings
         self._logger = logging.getLogger(__name__)
@@ -61,6 +86,10 @@ class TestCase(object):
 
         bidirectional = cfg.get('biDirectional', TRAFFIC_DEFAULTS['bidir'])
         bidirectional = get_test_param('bidirectional', bidirectional)
+        if not isinstance(bidirectional, str):
+            raise TypeError(
+                'Bi-dir value must be of type string in testcase configuration')
+        bidirectional = bidirectional.title()  # Keep things consistent
 
         traffic_type = cfg.get('Traffic Type', TRAFFIC_DEFAULTS['traffic_type'])
         traffic_type = get_test_param('traffic_type', traffic_type)
@@ -82,19 +111,6 @@ class TestCase(object):
                 self._tunnel_type = get_test_param('tunnel_type',
                                                    self._tunnel_type)
 
-
-        # identify guest loopback method, so it can be added into reports
-        self.guest_loopback = []
-        if self.deployment in ['pvp', 'pvvp']:
-            guest_loopback = get_test_param('guest_loopback', None)
-            if guest_loopback:
-                self.guest_loopback.append(guest_loopback)
-            else:
-                if self.deployment == 'pvp':
-                    self.guest_loopback.append(S.getValue('GUEST_LOOPBACK')[0])
-                else:
-                    self.guest_loopback = S.getValue('GUEST_LOOPBACK').copy()
-
         # read configuration of streams; CLI parameter takes precedence to
         # testcase definition
         multistream = cfg.get('MultiStream', TRAFFIC_DEFAULTS['multistream'])
@@ -127,27 +143,10 @@ class TestCase(object):
                               'pre_installed_flows' : pre_installed_flows,
                               'frame_rate': int(framerate)})
 
-        # OVS Vanilla requires guest VM MAC address and IPs to work
-        if 'linux_bridge' in self.guest_loopback:
-            self._traffic['l2'].update({'srcmac': S.getValue('GUEST_NET2_MAC')[0],
-                                        'dstmac': S.getValue('GUEST_NET1_MAC')[0]})
-            self._traffic['l3'].update({'srcip': S.getValue('VANILLA_TGEN_PORT1_IP'),
-                                        'dstip': S.getValue('VANILLA_TGEN_PORT2_IP')})
-
         # Packet Forwarding mode
         self._vswitch_none = 'none' == S.getValue('VSWITCH').strip().lower()
 
-    def run_initialize(self):
-        """ Prepare test execution environment
-        """
-        self._logger.debug(self.name)
-
-        # mount hugepages if needed
-        self._mount_hugepages()
-
-        # copy sources of l2 forwarding tools into VM shared dir if needed
-        self._copy_fwd_tools_for_guest()
-
+        # trafficgen configuration required for tests of tunneling protocols
         if self.deployment == "op2p":
             self._traffic['l2'].update({'srcmac':
                                         S.getValue('TRAFFICGEN_PORT1_MAC'),
@@ -171,7 +170,13 @@ class TestCase(object):
             else:
                 self._logger.debug("MAC addresses can not be read")
 
+    def run_initialize(self):
+        """ Prepare test execution environment
+        """
+        self._logger.debug(self.name)
 
+        # mount hugepages if needed
+        self._mount_hugepages()
 
         self._logger.debug("Controllers:")
         loader = Loader()
@@ -183,6 +188,32 @@ class TestCase(object):
             self.deployment,
             loader.get_vnf_class())
 
+        # verify enough hugepages are free to run the testcase
+        if not self._check_for_enough_hugepages():
+            raise RuntimeError('Not enough hugepages free to run test.')
+
+        # perform guest related handling
+        if self._vnf_ctl.get_vnfs_number():
+            # copy sources of l2 forwarding tools into VM shared dir if needed
+            self._copy_fwd_tools_for_all_guests(self._vnf_ctl.get_vnfs_number())
+
+            # in case of multi VM in parallel, set the number of streams to the number of VMs
+            if self.deployment.startswith('pvpv'):
+                # for each VM NIC pair we need an unique stream
+                streams = 0
+                for vm_nic in S.getValue('GUEST_NICS_NR')[:self._vnf_ctl.get_vnfs_number()]:
+                    streams += int(vm_nic / 2) if vm_nic > 1 else 1
+                self._logger.debug("VMs with parallel connection were detected. "
+                                   "Thus Number of streams was set to %s", streams)
+                self._traffic.update({'multistream': streams})
+
+            # OVS Vanilla requires guest VM MAC address and IPs to work
+            if 'linux_bridge' in S.getValue('GUEST_LOOPBACK'):
+                self._traffic['l2'].update({'srcmac': S.getValue('VANILLA_TGEN_PORT1_MAC'),
+                                            'dstmac': S.getValue('VANILLA_TGEN_PORT2_MAC')})
+                self._traffic['l3'].update({'srcip': S.getValue('VANILLA_TGEN_PORT1_IP'),
+                                            'dstip': S.getValue('VANILLA_TGEN_PORT2_IP')})
+
         if self._vswitch_none:
             self._vswitch_ctl = component_factory.create_pktfwd(
                 self.deployment,
@@ -212,6 +243,29 @@ class TestCase(object):
         # umount hugepages if mounted
         self._umount_hugepages()
 
+        # restore original settings
+        S.load_from_dict(self._settings_original)
+
+        # cleanup any namespaces created
+        if os.path.isdir('/tmp/namespaces'):
+            import tools.namespace
+            namespace_list = os.listdir('/tmp/namespaces')
+            if len(namespace_list):
+                self._logger.info('Cleaning up namespaces')
+            for name in namespace_list:
+                tools.namespace.delete_namespace(name)
+            os.rmdir('/tmp/namespaces')
+        # cleanup any veth ports created
+        if os.path.isdir('/tmp/veth'):
+            import tools.veth
+            veth_list = os.listdir('/tmp/veth')
+            if len(veth_list):
+                self._logger.info('Cleaning up veth ports')
+            for eth in veth_list:
+                port1, port2 = eth.split('-')
+                tools.veth.del_veth_port(port1, port2)
+            os.rmdir('/tmp/veth')
+
     def run_report(self):
         """ Report test results
         """
@@ -223,7 +277,7 @@ class TestCase(object):
             self._traffic_ctl.print_results()
 
             self._tc_results = self._append_results(self._traffic_ctl.get_results())
-            TestCase._write_result_to_file(self._tc_results, self._output_file)
+            TestCase.write_result_to_file(self._tc_results, self._output_file)
 
     def run(self):
         """Run the test
@@ -264,9 +318,25 @@ class TestCase(object):
         # tear down test execution environment and log results
         self.run_finalize()
 
+        self._testcase_run_time = time.strftime("%H:%M:%S",
+                                  time.gmtime(time.time() - self._testcase_start_time))
+        logging.info("Testcase execution time: " + self._testcase_run_time)
         # report test results
         self.run_report()
 
+    def _update_settings(self, param, value):
+        """ Check value of given configuration parameter
+        In case that new value is different, then testcase
+        specific settings is updated and original value stored
+
+        :param param: Name of parameter inside settings
+        :param value: Disired parameter value
+        """
+        orig_value = S.getValue(param)
+        if orig_value != value:
+            self._settings_original[param] = orig_value
+            S.setValue(param, value)
+
     def _append_results(self, results):
         """
         Method appends mandatory Test Case results to list of dictionaries.
@@ -280,51 +350,73 @@ class TestCase(object):
             item[ResultsConstants.ID] = self.name
             item[ResultsConstants.DEPLOYMENT] = self.deployment
             item[ResultsConstants.TRAFFIC_TYPE] = self._traffic['l3']['proto']
+            item[ResultsConstants.TEST_RUN_TIME] = self._testcase_run_time
             if self._traffic['multistream']:
                 item[ResultsConstants.SCAL_STREAM_COUNT] = self._traffic['multistream']
                 item[ResultsConstants.SCAL_STREAM_TYPE] = self._traffic['stream_type']
                 item[ResultsConstants.SCAL_PRE_INSTALLED_FLOWS] = self._traffic['pre_installed_flows']
-            if len(self.guest_loopback):
-                item[ResultsConstants.GUEST_LOOPBACK] = ' '.join(self.guest_loopback)
+            if self._vnf_ctl.get_vnfs_number():
+                item[ResultsConstants.GUEST_LOOPBACK] = ' '.join(S.getValue('GUEST_LOOPBACK'))
             if self._tunnel_type:
                 item[ResultsConstants.TUNNEL_TYPE] = self._tunnel_type
         return results
 
-    def _copy_fwd_tools_for_guest(self):
-        """Copy dpdk and l2fwd code to GUEST_SHARE_DIR[s] for use by guests.
+    def _copy_fwd_tools_for_all_guests(self, vm_count):
+        """Copy dpdk and l2fwd code to GUEST_SHARE_DIR[s] based on selected deployment.
+        """
+        # consider only VNFs involved in the test
+        for guest_dir in set(S.getValue('GUEST_SHARE_DIR')[:vm_count]):
+            self._copy_fwd_tools_for_guest(guest_dir)
+
+    def _copy_fwd_tools_for_guest(self, guest_dir):
+        """Copy dpdk and l2fwd code to GUEST_SHARE_DIR of VM
+
+        :param index: Index of VM starting from 1 (i.e. 1st VM has index 1)
         """
-        counter = 0
-        # method is executed only for pvp and pvvp, so let's count number of 'v'
-        while counter < self.deployment.count('v'):
-            guest_dir = S.getValue('GUEST_SHARE_DIR')[counter]
-
-            # remove shared dir if it exists to avoid issues with file consistency
-            if os.path.exists(guest_dir):
-                tasks.run_task(['rm', '-f', '-r', guest_dir], self._logger,
-                               'Removing content of shared directory...', True)
-
-            # directory to share files between host and guest
-            os.makedirs(guest_dir)
-
-            # copy sources into shared dir only if neccessary
-            if 'testpmd' in self.guest_loopback or 'l2fwd' in self.guest_loopback:
-                try:
-                    tasks.run_task(['rsync', '-a', '-r', '-l', r'--exclude="\.git"',
-                                    os.path.join(S.getValue('RTE_SDK'), ''),
-                                    os.path.join(guest_dir, 'DPDK')],
-                                   self._logger,
-                                   'Copying DPDK to shared directory...',
-                                   True)
-                    tasks.run_task(['rsync', '-a', '-r', '-l',
-                                    os.path.join(S.getValue('ROOT_DIR'), 'src/l2fwd/'),
-                                    os.path.join(guest_dir, 'l2fwd')],
-                                   self._logger,
-                                   'Copying l2fwd to shared directory...',
-                                   True)
-                except subprocess.CalledProcessError:
-                    self._logger.error('Unable to copy DPDK and l2fwd to shared directory')
-
-            counter += 1
+        # remove shared dir if it exists to avoid issues with file consistency
+        if os.path.exists(guest_dir):
+            tasks.run_task(['rm', '-f', '-r', guest_dir], self._logger,
+                           'Removing content of shared directory...', True)
+
+        # directory to share files between host and guest
+        os.makedirs(guest_dir)
+
+        # copy sources into shared dir only if neccessary
+        guest_loopback = set(S.getValue('GUEST_LOOPBACK'))
+        if 'testpmd' in guest_loopback:
+            try:
+                # exclude whole .git/ subdirectory and all o-files;
+                # It is assumed, that the same RTE_TARGET is used in both host
+                # and VMs; This simplification significantly speeds up testpmd
+                # build. If we will need a different RTE_TARGET in VM,
+                # then we have to build whole DPDK from the scratch in VM.
+                # In that case we can copy just DPDK sources (e.g. by excluding
+                # all items obtained by git status -unormal --porcelain).
+                # NOTE: Excluding RTE_TARGET directory won't help on systems,
+                # where DPDK is built for multiple targets (e.g. for gcc & icc)
+                exclude = []
+                exclude.append(r'--exclude=.git/')
+                exclude.append(r'--exclude=*.o')
+                tasks.run_task(['rsync', '-a', '-r', '-l'] + exclude +
+                               [os.path.join(S.getValue('TOOLS')['dpdk_src'], ''),
+                                os.path.join(guest_dir, 'DPDK')],
+                               self._logger,
+                               'Copying DPDK to shared directory...',
+                               True)
+            except subprocess.CalledProcessError:
+                self._logger.error('Unable to copy DPDK to shared directory')
+                raise
+        if 'l2fwd' in guest_loopback:
+            try:
+                tasks.run_task(['rsync', '-a', '-r', '-l',
+                                os.path.join(S.getValue('ROOT_DIR'), 'src/l2fwd/'),
+                                os.path.join(guest_dir, 'l2fwd')],
+                               self._logger,
+                               'Copying l2fwd to shared directory...',
+                               True)
+            except subprocess.CalledProcessError:
+                self._logger.error('Unable to copy l2fwd to shared directory')
+                raise
 
     def _mount_hugepages(self):
         """Mount hugepages if usage of DPDK or Qemu is detected
@@ -333,7 +425,8 @@ class TestCase(object):
         if not self._hugepages_mounted and \
             (self.deployment.count('v') or \
              S.getValue('VSWITCH').lower().count('dpdk') or \
-             self._vswitch_none):
+             self._vswitch_none or \
+             self.test and 'vnf' in [step[0][0:3] for step in self.test]):
             hugepages.mount_hugepages()
             self._hugepages_mounted = True
 
@@ -344,8 +437,82 @@ class TestCase(object):
             hugepages.umount_hugepages()
             self._hugepages_mounted = False
 
+    def _check_for_enough_hugepages(self):
+        """Check to make sure enough hugepages are free to satisfy the
+        test environment.
+        """
+        hugepages_needed = 0
+        hugepage_size = hugepages.get_hugepage_size()
+        # get hugepage amounts per guest involved in the test
+        for guest in range(self._vnf_ctl.get_vnfs_number()):
+            hugepages_needed += math.ceil((int(S.getValue(
+                'GUEST_MEMORY')[guest]) * 1000) / hugepage_size)
+
+        # get hugepage amounts for each socket on dpdk
+        sock0_mem, sock1_mem = 0, 0
+        if S.getValue('VSWITCH').lower().count('dpdk'):
+            # the import below needs to remain here and not put into the module
+            # imports because of an exception due to settings not yet loaded
+            from vswitches import ovs_dpdk_vhost
+            if ovs_dpdk_vhost.OvsDpdkVhost.old_dpdk_config():
+                match = re.search(
+                    r'-socket-mem\s+(\d+),(\d+)',
+                    ''.join(S.getValue('VSWITCHD_DPDK_ARGS')))
+                if match:
+                    sock0_mem, sock1_mem = (int(match.group(1)) * 1024 / hugepage_size,
+                                            int(match.group(2)) * 1024 / hugepage_size)
+                else:
+                    logging.info(
+                        'Could not parse socket memory config in dpdk params.')
+            else:
+                sock0_mem, sock1_mem = (
+                    S.getValue(
+                        'VSWITCHD_DPDK_CONFIG')['dpdk-socket-mem'].split(','))
+                sock0_mem, sock1_mem = (int(sock0_mem) * 1024 / hugepage_size,
+                                        int(sock1_mem) * 1024 / hugepage_size)
+
+        # If hugepages needed, verify the amounts are free
+        if any([hugepages_needed, sock0_mem, sock1_mem]):
+            free_hugepages = hugepages.get_free_hugepages()
+            if hugepages_needed:
+                logging.info('Need %s hugepages free for guests',
+                             hugepages_needed)
+                result1 = free_hugepages >= hugepages_needed
+                free_hugepages -= hugepages_needed
+            else:
+                result1 = True
+
+            if sock0_mem:
+                logging.info('Need %s hugepages free for dpdk socket 0',
+                             sock0_mem)
+                result2 = hugepages.get_free_hugepages('0') >= sock0_mem
+                free_hugepages -= sock0_mem
+            else:
+                result2 = True
+
+            if sock1_mem:
+                logging.info('Need %s hugepages free for dpdk socket 1',
+                             sock1_mem)
+                result3 = hugepages.get_free_hugepages('1') >= sock1_mem
+                free_hugepages -= sock1_mem
+            else:
+                result3 = True
+
+            logging.info('Need a total of {} total hugepages'.format(
+                hugepages_needed + sock1_mem + sock0_mem))
+
+            # The only drawback here is sometimes dpdk doesn't release
+            # its hugepages on a test failure. This could cause a test
+            # to fail when dpdk would be OK to start because it will just
+            # use the previously allocated hugepages.
+            result4 = True if free_hugepages >= 0 else False
+
+            return all([result1, result2, result3, result4])
+        else:
+            return True
+
     @staticmethod
-    def _write_result_to_file(results, output):
+    def write_result_to_file(results, output):
         """Write list of dictionaries to a CSV file.
 
         Each element on list will create separate row in output file.