X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=testcases%2Ftestcase.py;h=6e215b469c9bbe4bbf626d3f8a1added369f566f;hb=c9cd093f2f441adc9dd33627255326008e021a67;hp=1437aeaef35fee81d02f8ad5ef823b9254770c90;hpb=6c15bb9203da9a21bbf9cfcbb829d5bd58c70223;p=vswitchperf.git diff --git a/testcases/testcase.py b/testcases/testcase.py index 1437aeae..6e215b46 100644 --- a/testcases/testcase.py +++ b/testcases/testcase.py @@ -14,14 +14,18 @@ """TestCase base class """ +from collections import OrderedDict +import copy import csv +import logging +import math import os +import re import time -import logging import subprocess -import copy -from collections import OrderedDict +from conf import settings as S +from conf import get_test_param import core.component_factory as component_factory from core.loader import Loader from core.results.results_constants import ResultsConstants @@ -29,8 +33,7 @@ from tools import tasks from tools import hugepages from tools import functions from tools.pkt_gen.trafficgen.trafficgenhelper import TRAFFIC_DEFAULTS -from conf import settings as S -from conf import get_test_param + class TestCase(object): """TestCase base class @@ -54,7 +57,6 @@ class TestCase(object): self._loadgen = None self._output_file = None self._tc_results = None - self.guest_loopback = [] self._settings_original = {} self._settings_paths_modified = False self._testcast_run_time = None @@ -67,7 +69,8 @@ class TestCase(object): # update global settings guest_loopback = get_test_param('guest_loopback', None) if guest_loopback: - self._update_settings('GUEST_LOOPBACK', [guest_loopback for dummy in S.getValue('GUEST_LOOPBACK')]) + # we can put just one item, it'll be expanded automatically for all VMs + self._update_settings('GUEST_LOOPBACK', [guest_loopback]) if 'VSWITCH' in self._settings_original or 'VNF' in self._settings_original: self._settings_original.update({ @@ -109,12 +112,6 @@ class TestCase(object): self._tunnel_type = get_test_param('tunnel_type', self._tunnel_type) - # identify guest loopback method, so it can be added into reports - if self.deployment == 'pvp': - self.guest_loopback.append(S.getValue('GUEST_LOOPBACK')[0]) - else: - self.guest_loopback = S.getValue('GUEST_LOOPBACK').copy() - # read configuration of streams; CLI parameter takes precedence to # testcase definition multistream = cfg.get('MultiStream', TRAFFIC_DEFAULTS['multistream']) @@ -150,13 +147,6 @@ class TestCase(object): # Packet Forwarding mode self._vswitch_none = 'none' == S.getValue('VSWITCH').strip().lower() - # OVS Vanilla requires guest VM MAC address and IPs to work - if 'linux_bridge' in self.guest_loopback: - self._traffic['l2'].update({'srcmac': S.getValue('GUEST_NET2_MAC')[0], - 'dstmac': S.getValue('GUEST_NET1_MAC')[0]}) - self._traffic['l3'].update({'srcip': S.getValue('VANILLA_TGEN_PORT1_IP'), - 'dstip': S.getValue('VANILLA_TGEN_PORT2_IP')}) - # trafficgen configuration required for tests of tunneling protocols if self.deployment == "op2p": self._traffic['l2'].update({'srcmac': @@ -189,9 +179,6 @@ class TestCase(object): # mount hugepages if needed self._mount_hugepages() - # copy sources of l2 forwarding tools into VM shared dir if needed - self._copy_fwd_tools_for_all_guests() - self._logger.debug("Controllers:") loader = Loader() self._traffic_ctl = component_factory.create_traffic( @@ -202,6 +189,32 @@ class TestCase(object): self.deployment, loader.get_vnf_class()) + # verify enough hugepages are free to run the testcase + if not self._check_for_enough_hugepages(): + raise RuntimeError('Not enough hugepages free to run test.') + + # perform guest related handling + if self._vnf_ctl.get_vnfs_number(): + # copy sources of l2 forwarding tools into VM shared dir if needed + self._copy_fwd_tools_for_all_guests() + + # in case of multi VM in parallel, set the number of streams to the number of VMs + if self.deployment.startswith('pvpv'): + # for each VM NIC pair we need an unique stream + streams = 0 + for vm_nic in S.getValue('GUEST_NICS_NR')[:self._vnf_ctl.get_vnfs_number()]: + streams += int(vm_nic / 2) if vm_nic > 1 else 1 + self._logger.debug("VMs with parallel connection were detected. " + "Thus Number of streams was set to %s", streams) + self._traffic.update({'multistream': streams}) + + # OVS Vanilla requires guest VM MAC address and IPs to work + if 'linux_bridge' in S.getValue('GUEST_LOOPBACK'): + self._traffic['l2'].update({'srcmac': S.getValue('VANILLA_TGEN_PORT1_MAC'), + 'dstmac': S.getValue('VANILLA_TGEN_PORT2_MAC')}) + self._traffic['l3'].update({'srcip': S.getValue('VANILLA_TGEN_PORT1_IP'), + 'dstip': S.getValue('VANILLA_TGEN_PORT2_IP')}) + if self._vswitch_none: self._vswitch_ctl = component_factory.create_pktfwd( self.deployment, @@ -234,6 +247,26 @@ class TestCase(object): # restore original settings S.load_from_dict(self._settings_original) + # cleanup any namespaces created + if os.path.isdir('/tmp/namespaces'): + import tools.namespace + namespace_list = os.listdir('/tmp/namespaces') + if len(namespace_list): + self._logger.info('Cleaning up namespaces') + for name in namespace_list: + tools.namespace.delete_namespace(name) + os.rmdir('/tmp/namespaces') + # cleanup any veth ports created + if os.path.isdir('/tmp/veth'): + import tools.veth + veth_list = os.listdir('/tmp/veth') + if len(veth_list): + self._logger.info('Cleaning up veth ports') + for eth in veth_list: + port1, port2 = eth.split('-') + tools.veth.del_veth_port(port1, port2) + os.rmdir('/tmp/veth') + def run_report(self): """ Report test results """ @@ -245,7 +278,7 @@ class TestCase(object): self._traffic_ctl.print_results() self._tc_results = self._append_results(self._traffic_ctl.get_results()) - TestCase._write_result_to_file(self._tc_results, self._output_file) + TestCase.write_result_to_file(self._tc_results, self._output_file) def run(self): """Run the test @@ -323,8 +356,8 @@ class TestCase(object): item[ResultsConstants.SCAL_STREAM_COUNT] = self._traffic['multistream'] item[ResultsConstants.SCAL_STREAM_TYPE] = self._traffic['stream_type'] item[ResultsConstants.SCAL_PRE_INSTALLED_FLOWS] = self._traffic['pre_installed_flows'] - if self.deployment in ['pvp', 'pvvp'] and len(self.guest_loopback): - item[ResultsConstants.GUEST_LOOPBACK] = ' '.join(self.guest_loopback) + if self._vnf_ctl.get_vnfs_number(): + item[ResultsConstants.GUEST_LOOPBACK] = ' '.join(S.getValue('GUEST_LOOPBACK')) if self._tunnel_type: item[ResultsConstants.TUNNEL_TYPE] = self._tunnel_type return results @@ -332,19 +365,15 @@ class TestCase(object): def _copy_fwd_tools_for_all_guests(self): """Copy dpdk and l2fwd code to GUEST_SHARE_DIR[s] based on selected deployment. """ - # data are copied only for pvp and pvvp, so let's count number of 'v' - counter = 1 - while counter <= self.deployment.count('v'): - self._copy_fwd_tools_for_guest(counter) - counter += 1 + # consider only VNFs involved in the test + for guest_dir in set(S.getValue('GUEST_SHARE_DIR')[:self._vnf_ctl.get_vnfs_number()]): + self._copy_fwd_tools_for_guest(guest_dir) - def _copy_fwd_tools_for_guest(self, index): + def _copy_fwd_tools_for_guest(self, guest_dir): """Copy dpdk and l2fwd code to GUEST_SHARE_DIR of VM :param index: Index of VM starting from 1 (i.e. 1st VM has index 1) """ - guest_dir = S.getValue('GUEST_SHARE_DIR')[index-1] - # remove shared dir if it exists to avoid issues with file consistency if os.path.exists(guest_dir): tasks.run_task(['rm', '-f', '-r', guest_dir], self._logger, @@ -354,10 +383,11 @@ class TestCase(object): os.makedirs(guest_dir) # copy sources into shared dir only if neccessary - if 'testpmd' in self.guest_loopback or 'l2fwd' in self.guest_loopback: + guest_loopback = set(S.getValue('GUEST_LOOPBACK')) + if 'testpmd' in guest_loopback or 'l2fwd' in guest_loopback: try: tasks.run_task(['rsync', '-a', '-r', '-l', r'--exclude="\.git"', - os.path.join(S.getValue('RTE_SDK'), ''), + os.path.join(S.getValue('RTE_SDK_USER'), ''), os.path.join(guest_dir, 'DPDK')], self._logger, 'Copying DPDK to shared directory...', @@ -371,7 +401,6 @@ class TestCase(object): except subprocess.CalledProcessError: self._logger.error('Unable to copy DPDK and l2fwd to shared directory') - def _mount_hugepages(self): """Mount hugepages if usage of DPDK or Qemu is detected """ @@ -391,8 +420,82 @@ class TestCase(object): hugepages.umount_hugepages() self._hugepages_mounted = False + def _check_for_enough_hugepages(self): + """Check to make sure enough hugepages are free to satisfy the + test environment. + """ + hugepages_needed = 0 + hugepage_size = hugepages.get_hugepage_size() + # get hugepage amounts per guest involved in the test + for guest in range(self._vnf_ctl.get_vnfs_number()): + hugepages_needed += math.ceil((int(S.getValue( + 'GUEST_MEMORY')[guest]) * 1000) / hugepage_size) + + # get hugepage amounts for each socket on dpdk + sock0_mem, sock1_mem = 0, 0 + if S.getValue('VSWITCH').lower().count('dpdk'): + # the import below needs to remain here and not put into the module + # imports because of an exception due to settings not yet loaded + from vswitches import ovs_dpdk_vhost + if ovs_dpdk_vhost.OvsDpdkVhost.old_dpdk_config(): + match = re.search( + r'-socket-mem\s+(\d+),(\d+)', + ''.join(S.getValue('VSWITCHD_DPDK_ARGS'))) + if match: + sock0_mem, sock1_mem = (int(match.group(1)) * 1024 / hugepage_size, + int(match.group(2)) * 1024 / hugepage_size) + else: + logging.info( + 'Could not parse socket memory config in dpdk params.') + else: + sock0_mem, sock1_mem = ( + S.getValue( + 'VSWITCHD_DPDK_CONFIG')['dpdk-socket-mem'].split(',')) + sock0_mem, sock1_mem = (int(sock0_mem) * 1024 / hugepage_size, + int(sock1_mem) * 1024 / hugepage_size) + + # If hugepages needed, verify the amounts are free + if any([hugepages_needed, sock0_mem, sock1_mem]): + free_hugepages = hugepages.get_free_hugepages() + if hugepages_needed: + logging.info('Need %s hugepages free for guests', + hugepages_needed) + result1 = free_hugepages >= hugepages_needed + free_hugepages -= hugepages_needed + else: + result1 = True + + if sock0_mem: + logging.info('Need %s hugepages free for dpdk socket 0', + sock0_mem) + result2 = hugepages.get_free_hugepages('0') >= sock0_mem + free_hugepages -= sock0_mem + else: + result2 = True + + if sock1_mem: + logging.info('Need %s hugepages free for dpdk socket 1', + sock1_mem) + result3 = hugepages.get_free_hugepages('1') >= sock1_mem + free_hugepages -= sock1_mem + else: + result3 = True + + logging.info('Need a total of {} total hugepages'.format( + hugepages_needed + sock1_mem + sock0_mem)) + + # The only drawback here is sometimes dpdk doesn't release + # its hugepages on a test failure. This could cause a test + # to fail when dpdk would be OK to start because it will just + # use the previously allocated hugepages. + result4 = True if free_hugepages >= 0 else False + + return all([result1, result2, result3, result4]) + else: + return True + @staticmethod - def _write_result_to_file(results, output): + def write_result_to_file(results, output): """Write list of dictionaries to a CSV file. Each element on list will create separate row in output file.