multi VM: Multi VMs in serial or parallel 07/19507/6
authorMartin Klozik <martinx.klozik@intel.com>
Tue, 16 Aug 2016 13:59:05 +0000 (14:59 +0100)
committerMartin Klozik <martinx.klozik@intel.com>
Thu, 1 Sep 2016 13:52:49 +0000 (14:52 +0100)
Support for deployment scenarios with any number of VMs
in both serial and parallel configuration. Detailed
content of the patch:

* VswitchControllerPXP class for multi VM support
* pvvpxx and pvpvxx deployments for xx VMs in
  serial respective parallel configuration
* special GUEST_ options expansion to requested
  number of VMs;
* support of GUEST_ options specific macros
  #VMINDEX, #MAC(), #IP() and #EVAL()
* all GUEST specific options are turned to lists
  to be VM specific
* support for VM with 1 NIC
* support for VM with multiple NIC pairs; traffic
  is routed in serial or parallel between NIC paris
  based on deployment scenario
* support for PVVP and PVPV scenarios using VMs
  with different numbers of NICs

JIRA: VSPERF-361

Change-Id: I05bedbdfa9a81ea0166d9b03d83ae49d6cb8b19b
Signed-off-by: Martin Klozik <martinx.klozik@intel.com>
Reviewed-by: Maryam Tahhan <maryam.tahhan@intel.com>
Reviewed-by: Al Morton <acmorton@att.com>
Reviewed-by: Christian Trautman <ctrautma@redhat.com>
Reviewed-by: Bill Michalowski <bmichalo@redhat.com>
Reviewed-by: Antonio Fischetti <antonio.fischetti@intel.com>
20 files changed:
conf/01_testcases.conf
conf/02_vswitch.conf
conf/04_vnf.conf
conf/__init__.py
core/component_factory.py
core/vnf_controller.py
core/vswitch_controller_pvp.py [deleted file]
core/vswitch_controller_pvvp.py [deleted file]
core/vswitch_controller_pxp.py [new file with mode: 0644]
docs/configguide/installation.rst
docs/design/vswitchperf_design.rst
docs/msc/vsperf.msc
docs/userguide/testusage.rst
src/dpdk/dpdk.py
testcases/testcase.py
vnfs/qemu/qemu.py
vnfs/qemu/qemu_dpdk_vhost_user.py
vnfs/qemu/qemu_virtio_net.py
vnfs/vnf/vnf.py
vswitches/ovs_vanilla.py

index 23a3ae5..b9c59a1 100755 (executable)
@@ -170,6 +170,14 @@ PERFORMANCE_TESTS = [
         "biDirectional": "True",
         "iLoad": "100",
     },
+    {
+        "Name": "pvpv_cont",
+        "Traffic Type": "continuous",
+        "Deployment": "pvpv",
+        "Description": "Two VMs in parallel with Continuous Stream",
+        "biDirectional": "True",
+        "iLoad": "100",
+    },
     {
         "Name": "phy2phy_scalability",
         "Traffic Type": "rfc2544",
index cd2b8d2..abca63b 100644 (file)
@@ -60,13 +60,7 @@ SYS_MODULES = ['uio', 'cuse']
 VHOST_DEV_FILE = 'ovs-vhost-net'
 
 # location of vhost-user sockets
-VHOST_USER_SOCKS = ['/tmp/dpdkvhostuser0', '/tmp/dpdkvhostuser1',
-                    '/tmp/dpdkvhostuser2', '/tmp/dpdkvhostuser3',
-                    '/tmp/dpdkvhostuser4', '/tmp/dpdkvhostuser5',
-                    '/tmp/dpdkvhostuser6', '/tmp/dpdkvhostuser7',
-                    '/tmp/dpdkvhostuser8', '/tmp/dpdkvhostuser9',
-                    '/tmp/dpdkvhostuser10', '/tmp/dpdkvhostuser11',
-                    '/tmp/myport0', '/tmp/helloworld123', '/tmp/abcstuff0']
+VHOST_USER_SOCKS = os.path.join(OVS_VAR_DIR, 'dpdkvhostuser*')
 
 # ############################
 # vswitch configuration
index 05893fb..2e86b35 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2016 Intel Corporation.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # ############################
 VNF_DIR = 'vnfs/'
 VNF = 'QemuDpdkVhostUser'
+VNF_AFFINITIZATION_ON = True
+
+# ############################
+# Executables and log files
+# ############################
+
+QEMU_BIN = os.path.join(QEMU_DIR, 'x86_64-softmmu/qemu-system-x86_64')
+
+# log file for qemu
+LOG_FILE_QEMU = 'qemu.log'
+
+# log file for all commands executed on guest(s)
+# multiple guests will result in log files with the guest number appended
+LOG_FILE_GUEST_CMDS = 'guest-cmds.log'
 
 # ############################
 # Guest configuration
 # ############################
+# All configuration options related to a particular VM instance are defined as
+# lists and prefixed with `GUEST_` label. It is essential, that there is enough
+# items in all `GUEST_` options to cover all VM instances involved in the test.
+# In case there is not enough items, then VSPERF will use the first item of
+# particular `GUEST_` option to expand the list to required length. First option
+# can contain macros starting with `#` to generate VM specific values. These
+# macros can be used only for options of `list` or `str` types with `GUEST_`
+# prefix.
+# Following macros are supported:
+#
+# * #VMINDEX - it is replaced by index of VM being executed; This macro is
+#   expanded first, so it can be used inside other macros.
+#
+# * #MAC(mac_address[, step]) - it will iterate given `mac_address` with
+#   optional `step`. In case that step is not defined, then it is set to 1.
+#   It means, that first VM will use the value of `mac_address`, second VM
+#   value of `mac_address` increased by `step`, etc.
+#
+# * #IP(ip_address[, step]) - it will iterate given `ip_address` with optional
+#   step. In case that step is not defined, then it is set to 1.
+#   It means, that first VM will use the value of `ip_address`, second VM
+#   value of `ip_address` increased by `step`, etc.
+#
+# * #EVAL(expression) - it will evaluate given `expression` as python code;
+#   Only simple expressions should be used. Call of the functions is not
+#   supported.
 
 # directory which is shared to QEMU guests. Useful for exchanging files
 # between host and guest, VNF specific share will be created
 # For 2 VNFs you may use ['/tmp/qemu0_share', '/tmp/qemu1_share']
-GUEST_SHARE_DIR = ['/tmp/qemu0_share', '/tmp/qemu1_share', \
-                   '/tmp/qemu2_share', '/tmp/qemu3_share', \
-                   '/tmp/qemu4_share', '/tmp/qemu5_share']
+GUEST_SHARE_DIR = ['/tmp/qemu0_share']
 
 # location of guest disk image
 # For 2 VNFs you may use ['guest1.img', 'guest2.img']
-GUEST_IMAGE = ['', '', '', '', '', '']
+GUEST_IMAGE = ['']
 
 # guarding timer for VM start up
 # For 2 VNFs you may use [180, 180]
-GUEST_TIMEOUT = [180, 180, 180, 180, 180, 180]
+GUEST_TIMEOUT = [180]
 
 # Guest images may require different drive types such as ide to mount shared
 # locations and/or boot correctly. You can modify the types here.
-GUEST_BOOT_DRIVE_TYPE = 'scsi'
-GUEST_SHARED_DRIVE_TYPE = 'scsi'
+GUEST_BOOT_DRIVE_TYPE = ['scsi']
+GUEST_SHARED_DRIVE_TYPE = ['scsi']
 
 # packet forwarding mode supported by testpmd; Please see DPDK documentation
 # for comprehensive list of modes supported by your version.
@@ -57,71 +95,50 @@ GUEST_TESTPMD_FWD_MODE = 'csum'
 # This configuration option can be overridden by CLI SCALAR option
 # guest_loopback, e.g. --test-params "guest_loopback=l2fwd"
 # For 2 VNFs you may use ['testpmd', 'l2fwd']
-GUEST_LOOPBACK = ['testpmd', 'testpmd', \
-                  'testpmd', 'testpmd', \
-                  'testpmd', 'testpmd']
+GUEST_LOOPBACK = ['testpmd']
 
 # username for guest image
-GUEST_USERNAME = 'root'
+GUEST_USERNAME = ['root']
 
 # password for guest image
-GUEST_PASSWORD = 'root'
+GUEST_PASSWORD = ['root']
 
 # login username prompt for guest image
-GUEST_PROMPT_LOGIN = '.* login:'
+GUEST_PROMPT_LOGIN = ['.* login:']
 
 # login password prompt for guest image
-GUEST_PROMPT_PASSWORD = 'Password: '
+GUEST_PROMPT_PASSWORD = ['Password: ']
 
 # standard prompt for guest image
-GUEST_PROMPT = 'root.*#'
+GUEST_PROMPT = ['root.*#']
 
-# log file for qemu
-LOG_FILE_QEMU = 'qemu.log'
+# defines the number of NICs configured for each guest, it must be less or equal to
+# the number of NICs configured in GUEST_NICS
+GUEST_NICS_NR = [2]
 
-# log file for all commands executed on guest(s)
-# multiple guests will result in log files with the guest number appended
-LOG_FILE_GUEST_CMDS = 'guest-cmds.log'
+# template for guests with 4 NICS, but only GUEST_NICS_NR NICS will be configured at runtime
+GUEST_NICS = [[{'device' : 'eth0', 'mac' : '#MAC(00:00:00:00:00:01,2)', 'pci' : '00:04.0', 'ip' : '#IP(192.168.1.2,4)/24'},
+               {'device' : 'eth1', 'mac' : '#MAC(00:00:00:00:00:02,2)', 'pci' : '00:05.0', 'ip' : '#IP(192.168.1.3,4)/24'},
+               {'device' : 'eth2', 'mac' : '#MAC(cc:00:00:00:00:01,2)', 'pci' : '00:06.0', 'ip' : '#IP(192.168.1.4,4)/24'},
+               {'device' : 'eth3', 'mac' : '#MAC(cc:00:00:00:00:02,2)', 'pci' : '00:07.0', 'ip' : '#IP(192.168.1.5,4)/24'},
+             ]]
 
-# ############################
-# Executables
-# ############################
-
-QEMU_BIN = os.path.join(QEMU_DIR, 'x86_64-softmmu/qemu-system-x86_64')
-
-# For 2 VNFs you may use ['eth0', 'eth2']
-GUEST_NIC1_NAME = ['eth0', 'eth0', 'eth0', 'eth0', 'eth0', 'eth0']
-GUEST_NIC2_NAME = ['eth1', 'eth1', 'eth1', 'eth1', 'eth1', 'eth1']
-
-# For 2 VNFs you may use ['00:00:00:00:00:01', '00:00:00:00:00:03']
-GUEST_NET1_MAC = ['00:00:00:00:00:01', '00:00:00:00:00:03', \
-                  '00:00:00:00:00:05', '00:00:00:00:00:07', \
-                  '00:00:00:00:00:09', '00:00:00:00:00:0b']
-GUEST_NET2_MAC = ['00:00:00:00:00:02', '00:00:00:00:00:04', \
-                  '00:00:00:00:00:06', '00:00:00:00:00:08', \
-                  '00:00:00:00:00:0a', '00:00:00:00:00:0c']
-
-# For 2 VNFs you may use ['00:04.0', '00:04.0']
-GUEST_NET1_PCI_ADDRESS = ['00:04.0', '00:04.0', \
-                          '00:04.0', '00:04.0', \
-                          '00:04.0', '00:04.0']
-GUEST_NET2_PCI_ADDRESS = ['00:05.0', '00:05.0', \
-                          '00:05.0', '00:05.0', \
-                          '00:05.0', '00:05.0']
-
-GUEST_MEMORY = ['4096', '4096', '2048', '2048', '2048', '2048']
+# amount of host memory allocated for each guest
+GUEST_MEMORY = ['2048']
+# number of hugepages configured inside each guest
+GUEST_HUGEPAGES_NR = ['1024']
 
 # test-pmd requires 2 VM cores
-GUEST_SMP = ['2', '2', '2', '2', '2', '2']
+GUEST_SMP = ['2']
 
 # Host cores to use to affinitize the SMP cores of a QEMU instance
 # For 2 VNFs you may use [(4,5), (6, 7)]
-GUEST_CORE_BINDING = [(6, 7), (9, 10), (11, 12), (13, 14), (15, 16), (17, 18)]
+GUEST_CORE_BINDING = [('#EVAL(6+2*#VMINDEX)', '#EVAL(7+2*#VMINDEX)')]
 
 # Queues per NIC inside guest for multi-queue configuration, requires switch
 # multi-queue to be enabled for dpdk. Set to 0 for disabled. Can be enabled if
 # using Vanilla OVS without enabling switch multi-queue.
-GUEST_NIC_QUEUES = 0
+GUEST_NIC_QUEUES = [0]
 
 # Virtio-Net vhost thread CPU mapping. If using  vanilla OVS with virtio-net,
 # you can affinitize the vhost-net threads by enabling the below setting. There
@@ -131,22 +148,11 @@ GUEST_NIC_QUEUES = 0
 VSWITCH_VHOST_NET_AFFINITIZATION = False
 VSWITCH_VHOST_CPU_MAP = [4,5,8,11]
 
-GUEST_START_TIMEOUT = 120
-GUEST_OVS_DPDK_DIR = '/root/ovs_dpdk'
-OVS_DPDK_SHARE = '/mnt/ovs_dpdk_share'
-
-# Set the CPU mask for testpmd loopback. To bind to specific guest CPUs use -l
-# GUEST_TESTPMD_CPU_MASK = '-l 0,1'
-GUEST_TESTPMD_CPU_MASK = '-c 0x3'
-
-# Testpmd multi-core config. Leave at 0's for disabled. Will not enable unless
-# GUEST_NIC_QUEUES are > 0. For bi directional traffic NB_CORES must be equal
-# to (RXQ + TXQ).
-GUEST_TESTPMD_NB_CORES = 0
-GUEST_TESTPMD_TXQ = 0
-GUEST_TESTPMD_RXQ = 0
+GUEST_START_TIMEOUT = [120]
+GUEST_OVS_DPDK_DIR = ['/root/ovs_dpdk']
+GUEST_OVS_DPDK_SHARE = ['/mnt/ovs_dpdk_share']
 
-# IP addresses to use for Vanilla OVS PVP testing
+# IP addresses to use for Vanilla OVS PXP testing
 # Consider using RFC 2544/3330 recommended IP addresses for benchmark testing.
 # Network: 198.18.0.0/15
 # Netmask: 255.254.0.0
@@ -163,15 +169,25 @@ VANILLA_TGEN_PORT1_MAC = 'AA:BB:CC:DD:EE:FF'
 VANILLA_TGEN_PORT2_IP = '1.1.2.10'
 VANILLA_TGEN_PORT2_MAC = 'AA:BB:CC:DD:EE:F0'
 
-VANILLA_BRIDGE_IP = ['1.1.1.5/16', '1.1.1.6/16', \
-                     '1.1.1.7/16', '1.1.1.8/16', \
-                     '1.1.1.9/16', '1.1.1.10/16']
+GUEST_BRIDGE_IP = ['#IP(1.1.1.5)/16']
+
+# ############################
+# Guest TESTPMD configuration
+# ############################
+
+# packet forwarding mode supported by testpmd; Please see DPDK documentation
+# for comprehensive list of modes supported by your version.
+# e.g. io|mac|mac_retry|macswap|flowgen|rxonly|txonly|csum|icmpecho|...
+# Note: Option "mac_retry" has been changed to "mac retry" since DPDK v16.07
+GUEST_TESTPMD_FWD_MODE = ['csum']
 
-VANILLA_NIC1_IP_CIDR = ['192.168.1.2/24', '192.168.1.4/24', \
-                        '192.168.1.6/24', '192.168.1.8/24', \
-                        '192.168.1.10/24', '192.168.1.12/24']
-VANILLA_NIC2_IP_CIDR = ['192.168.1.3/24', '192.168.1.5/24', \
-                        '192.168.1.7/24', '192.168.1.9/24', \
-                        '192.168.1.11/24', '192.168.1.13/24']
+# Set the CPU mask for testpmd loopback. To bind to specific guest CPUs use -l
+# GUEST_TESTPMD_CPU_MASK = ['-l 0,1']
+GUEST_TESTPMD_CPU_MASK = ['-c 0x3']
 
-VNF_AFFINITIZATION_ON = True
+# Testpmd multi-core config. Leave at 0's for disabled. Will not enable unless
+# GUEST_NIC_QUEUES are > 0. For bi directional traffic NB_CORES must be equal
+# to (RXQ + TXQ).
+GUEST_TESTPMD_NB_CORES = [0]
+GUEST_TESTPMD_TXQ = [0]
+GUEST_TESTPMD_RXQ = [0]
index 4622823..88e8cec 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2016 Intel Corporation.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,7 +22,22 @@ and any user provided settings file.
 
 import os
 import re
+import logging
 import pprint
+import ast
+import netaddr
+
+_LOGGER = logging.getLogger(__name__)
+
+# regex to parse configuration macros from 04_vnf.conf
+# it will select all patterns starting with # sign
+# and returns macro parameters and step
+# examples of valid macros:
+#   #VMINDEX
+#   #MAC(AA:BB:CC:DD:EE:FF) or #MAC(AA:BB:CC:DD:EE:FF,2)
+#   #IP(192.168.1.2) or #IP(192.168.1.2,2)
+#   #EVAL(2*#VMINDEX)
+_PARSE_PATTERN = r'(#[A-Z]+)(\(([^(),]+)(,([0-9]+))?\))?'
 
 class Settings(object):
     """Holding class for settings.
@@ -121,6 +136,64 @@ class Settings(object):
         for key in os.environ:
             setattr(self, key, os.environ[key])
 
+    def check_vm_settings(self, vm_number):
+        """
+        Check all VM related settings starting with GUEST_ prefix.
+        If it is not available for defined number of VMs, then vsperf
+        will try to expand it automatically. Expansion is performed
+        also in case that first list item contains a macro.
+        """
+        for key in self.__dict__:
+            if key.startswith('GUEST_'):
+                if (isinstance(self.__dict__[key], str) and
+                        self.__dict__[key].find('#') >= 0):
+                    self.__dict__[key] = [self.__dict__[key]]
+                    self._expand_vm_settings(key, 1)
+                    self.__dict__[key] = self.__dict__[key][0]
+
+                if isinstance(self.__dict__[key], list):
+                    if (len(self.__dict__[key]) < vm_number or
+                            str(self.__dict__[key][0]).find('#') >= 0):
+                        # expand configuration for all VMs
+                        self._expand_vm_settings(key, vm_number)
+
+    def _expand_vm_settings(self, key, vm_number):
+        """
+        Expand VM option with given key for given number of VMs
+        """
+        master_value = self.__dict__[key][0]
+        master_value_str = str(master_value)
+        if master_value_str.find('#') >= 0:
+            self.__dict__[key] = []
+            for vmindex in range(vm_number):
+                value = master_value_str.replace('#VMINDEX', str(vmindex))
+                for macro, args, param, _, step in re.findall(_PARSE_PATTERN, value):
+                    multi = int(step) if len(step) and int(step) else 1
+                    if macro == '#EVAL':
+                        tmp_result = str(eval(param))
+                    elif macro == '#MAC':
+                        mac_value = netaddr.EUI(param).value
+                        mac = netaddr.EUI(mac_value + vmindex * multi)
+                        mac.dialect = netaddr.mac_unix_expanded
+                        tmp_result = str(mac)
+                    elif macro == '#IP':
+                        ip_value = netaddr.IPAddress(param).value
+                        tmp_result = str(netaddr.IPAddress(ip_value + vmindex * multi))
+                    else:
+                        raise RuntimeError('Unknown configuration macro {} in {}'.format(macro, key))
+
+                    value = value.replace("{}{}".format(macro, args), tmp_result)
+
+                # retype value to original type if needed
+                if not isinstance(master_value, str):
+                    value = ast.literal_eval(value)
+                self.__dict__[key].append(value)
+        else:
+            for vmindex in range(len(self.__dict__[key]), vm_number):
+                self.__dict__[key].append(master_value)
+
+        _LOGGER.debug("Expanding option: %s = %s", key, self.__dict__[key])
+
     def __str__(self):
         """Provide settings as a human-readable string.
 
index 258b723..7f453bd 100644 (file)
@@ -18,8 +18,7 @@
 from core.traffic_controller_rfc2544 import TrafficControllerRFC2544
 from core.vswitch_controller_clean import VswitchControllerClean
 from core.vswitch_controller_p2p import VswitchControllerP2P
-from core.vswitch_controller_pvp import VswitchControllerPVP
-from core.vswitch_controller_pvvp import VswitchControllerPVVP
+from core.vswitch_controller_pxp import VswitchControllerPXP
 from core.vswitch_controller_op2p import VswitchControllerOP2P
 from core.vswitch_controller_ptunp import VswitchControllerPtunP
 from core.vnf_controller import VnfController
@@ -57,7 +56,7 @@ def create_vswitch(deployment_scenario, vswitch_class, traffic,
 
     The returned controller is configured with the given vSwitch class.
 
-    Deployment scenarios: 'p2p', 'pvp'
+    Deployment scenarios: e.g. 'p2p', 'pvp', 'pvpv12', etc.
 
     :param deployment_scenario: The deployment scenario name
     :param vswitch_class: Reference to vSwitch class to be used.
@@ -66,18 +65,22 @@ def create_vswitch(deployment_scenario, vswitch_class, traffic,
     :return: IVSwitchController for the deployment_scenario
     """
     deployment_scenario = deployment_scenario.lower()
-    if deployment_scenario.find("p2p") == 0:
+    if deployment_scenario.startswith("p2p"):
         return VswitchControllerP2P(vswitch_class, traffic)
-    elif deployment_scenario.find("pvp") >= 0:
-        return VswitchControllerPVP(vswitch_class, traffic)
-    elif deployment_scenario.find("pvvp") >= 0:
-        return VswitchControllerPVVP(vswitch_class, traffic)
-    elif deployment_scenario.find("op2p") >= 0:
+    elif deployment_scenario.startswith("pvp"):
+        return VswitchControllerPXP(deployment_scenario, vswitch_class, traffic)
+    elif deployment_scenario.startswith("pvvp"):
+        return VswitchControllerPXP(deployment_scenario, vswitch_class, traffic)
+    elif deployment_scenario.startswith("pvpv"):
+        return VswitchControllerPXP(deployment_scenario, vswitch_class, traffic)
+    elif deployment_scenario.startswith("op2p"):
         return VswitchControllerOP2P(vswitch_class, traffic, tunnel_operation)
-    elif deployment_scenario.find("ptunp") >= 0:
+    elif deployment_scenario.startswith("ptunp"):
         return VswitchControllerPtunP(vswitch_class, traffic)
-    elif deployment_scenario.find("clean") >= 0:
+    elif deployment_scenario.startswith("clean"):
         return VswitchControllerClean(vswitch_class, traffic)
+    else:
+        raise RuntimeError("Unknown deployment scenario '{}'.".format(deployment_scenario))
 
 
 def create_vnf(deployment_scenario, vnf_class):
index 8800cca..2970066 100644 (file)
@@ -16,6 +16,7 @@
 
 import logging
 import pexpect
+from conf import settings
 from vnfs.vnf.vnf import IVnf
 
 class VnfController(object):
@@ -25,13 +26,13 @@ class VnfController(object):
 
     Attributes:
         _vnf_class: A class object representing the VNF to be used.
-        _deployment_scenario: A string describing the scenario to set-up in the
+        _deployment: A string describing the scenario to set-up in the
             constructor.
         _vnfs: A list of vnfs controlled by the controller.
     """
 
-    def __init__(self, deployment_scenario, vnf_class):
-        """Sets up the VNF infrastructure for the PVP deployment scenario.
+    def __init__(self, deployment, vnf_class):
+        """Sets up the VNF infrastructure based on deployment scenario
 
         :param vnf_class: The VNF class to be used.
         """
@@ -41,17 +42,38 @@ class VnfController(object):
         # setup controller with requested number of VNFs
         self._logger = logging.getLogger(__name__)
         self._vnf_class = vnf_class
-        self._deployment_scenario = deployment_scenario.upper()
-        if self._deployment_scenario == 'P2P':
-            self._vnfs = []
-        elif self._deployment_scenario == 'PVP':
-            self._vnfs = [vnf_class()]
-        elif self._deployment_scenario == 'PVVP':
-            self._vnfs = [vnf_class(), vnf_class()]
-        elif self._deployment_scenario == 'OP2P':
-            self._vnfs = []
+        self._deployment = deployment.lower()
+        self._vnfs = []
+        if self._deployment == 'pvp':
+            vm_number = 1
+        elif (self._deployment.startswith('pvvp') or
+              self._deployment.startswith('pvpv')):
+            if len(self._deployment) > 4:
+                vm_number = int(self._deployment[4:])
+            else:
+                vm_number = 2
         else:
-            self._vnfs = []
+            raise RuntimeError('Deployment {} is not supported by '
+                               'VnfController.'.format(self._deployment))
+
+        if vm_number:
+            self._logger.debug('Check configuration for %s guests.', vm_number)
+            settings.check_vm_settings(vm_number)
+            # enforce that GUEST_NIC_NR is 1 or even number of NICs
+            updated = False
+            nics_nr = settings.getValue('GUEST_NICS_NR')
+            for index in range(len(nics_nr)):
+                if nics_nr[index] > 1 and nics_nr[index] % 2:
+                    updated = True
+                    nics_nr[index] = int(nics_nr[index] / 2) * 2
+            if updated:
+                settings.setValue('GUEST_NICS_NR', nics_nr)
+                self._logger.warning('Odd number of NICs was detected. Configuration '
+                                     'was updated to GUEST_NICS_NR = %s',
+                                     settings.getValue('GUEST_NICS_NR'))
+
+            self._vnfs = [vnf_class() for _ in range(vm_number)]
+
         self._logger.debug('__init__ ' + str(len(self._vnfs)) +
                            ' VNF[s] with ' + ' '.join(map(str, self._vnfs)))
 
@@ -62,6 +84,13 @@ class VnfController(object):
                            ' VNF[s] with ' + ' '.join(map(str, self._vnfs)))
         return self._vnfs
 
+    def get_vnfs_number(self):
+        """Returns a number of vnfs controlled by this controller.
+        """
+        self._logger.debug('get_vnfs_number ' + str(len(self._vnfs)) +
+                           ' VNF[s]')
+        return len(self._vnfs)
+
     def start(self):
         """Boots all VNFs set-up by __init__.
 
diff --git a/core/vswitch_controller_pvp.py b/core/vswitch_controller_pvp.py
deleted file mode 100644 (file)
index a4f6196..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright 2015 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""VSwitch controller for Physical to VM to Physical deployment
-"""
-
-import logging
-
-from core.vswitch_controller import IVswitchController
-from vswitches.utils import add_ports_to_flow
-from conf import settings
-
-_FLOW_TEMPLATE = {
-    'idle_timeout': '0'
-}
-
-class VswitchControllerPVP(IVswitchController):
-    """VSwitch controller for PVP deployment scenario.
-
-    Attributes:
-        _vswitch_class: The vSwitch class to be used.
-        _vswitch: The vSwitch object controlled by this controller
-        _deployment_scenario: A string describing the scenario to set-up in the
-            constructor.
-    """
-    def __init__(self, vswitch_class, traffic):
-        """Initializes up the prerequisites for the PVP deployment scenario.
-
-        :vswitch_class: the vSwitch class to be used.
-        """
-        self._logger = logging.getLogger(__name__)
-        self._vswitch_class = vswitch_class
-        self._vswitch = vswitch_class()
-        self._deployment_scenario = "PVP"
-        self._traffic = traffic.copy()
-        self._logger.debug('Creation using ' + str(self._vswitch_class))
-
-    def setup(self):
-        """ Sets up the switch for pvp
-        """
-        self._logger.debug('Setup using ' + str(self._vswitch_class))
-
-        try:
-            self._vswitch.start()
-
-            bridge = settings.getValue('VSWITCH_BRIDGE_NAME')
-            self._vswitch.add_switch(bridge)
-
-            (_, phy1_number) = self._vswitch.add_phy_port(bridge)
-            (_, phy2_number) = self._vswitch.add_phy_port(bridge)
-            (_, vport1_number) = self._vswitch.add_vport(bridge)
-            (_, vport2_number) = self._vswitch.add_vport(bridge)
-
-            self._vswitch.del_flow(bridge)
-
-            # configure flows according to the TC definition
-            flow_template = _FLOW_TEMPLATE.copy()
-            if self._traffic['flow_type'] == 'IP':
-                flow_template.update({'dl_type':'0x0800', 'nw_src':self._traffic['l3']['srcip'],
-                                      'nw_dst':self._traffic['l3']['dstip']})
-
-            flow1 = add_ports_to_flow(flow_template, phy1_number,
-                                      vport1_number)
-            flow2 = add_ports_to_flow(flow_template, vport2_number,
-                                      phy2_number)
-            self._vswitch.add_flow(bridge, flow1)
-            self._vswitch.add_flow(bridge, flow2)
-
-            if self._traffic['bidir'] == 'True':
-                flow3 = add_ports_to_flow(flow_template, phy2_number,
-                                          vport2_number)
-                flow4 = add_ports_to_flow(flow_template, vport1_number,
-                                          phy1_number)
-                self._vswitch.add_flow(bridge, flow3)
-                self._vswitch.add_flow(bridge, flow4)
-
-        except:
-            self._vswitch.stop()
-            raise
-
-    def stop(self):
-        """Tears down the switch created in setup().
-        """
-        self._logger.debug('Stop using ' + str(self._vswitch_class))
-        self._vswitch.stop()
-
-    def __enter__(self):
-        self.setup()
-
-    def __exit__(self, type_, value, traceback):
-        self.stop()
-
-    def get_vswitch(self):
-        """See IVswitchController for description
-        """
-        return self._vswitch
-
-    def get_ports_info(self):
-        """See IVswitchController for description
-        """
-        self._logger.debug('get_ports_info  using ' + str(self._vswitch_class))
-        return self._vswitch.get_ports(settings.getValue('VSWITCH_BRIDGE_NAME'))
-
-    def dump_vswitch_flows(self):
-        """See IVswitchController for description
-        """
-        self._vswitch.dump_flows(settings.getValue('VSWITCH_BRIDGE_NAME'))
diff --git a/core/vswitch_controller_pvvp.py b/core/vswitch_controller_pvvp.py
deleted file mode 100644 (file)
index 729aca3..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2015 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""VSwitch controller for Physical to VM to Physical deployment
-"""
-
-import logging
-
-from core.vswitch_controller import IVswitchController
-from vswitches.utils import add_ports_to_flow
-from conf import settings
-
-_FLOW_TEMPLATE = {
-    'idle_timeout': '0'
-}
-
-class VswitchControllerPVVP(IVswitchController):
-    """VSwitch controller for PVVP deployment scenario.
-
-    Attributes:
-        _vswitch_class: The vSwitch class to be used.
-        _vswitch: The vSwitch object controlled by this controller
-        _deployment_scenario: A string describing the scenario to set-up in the
-            constructor.
-    """
-    def __init__(self, vswitch_class, traffic):
-        """Initializes up the prerequisites for the PVVP deployment scenario.
-
-        :vswitch_class: the vSwitch class to be used.
-        """
-        self._logger = logging.getLogger(__name__)
-        self._vswitch_class = vswitch_class
-        self._vswitch = vswitch_class()
-        self._deployment_scenario = "PVVP"
-        self._traffic = traffic.copy()
-        self._logger.debug('Creation using ' + str(self._vswitch_class))
-
-    def setup(self):
-        """ Sets up the switch for PVVP
-        """
-        self._logger.debug('Setup using ' + str(self._vswitch_class))
-
-        try:
-            self._vswitch.start()
-
-            bridge = settings.getValue('VSWITCH_BRIDGE_NAME')
-            self._vswitch.add_switch(bridge)
-
-            (_, phy1_number) = self._vswitch.add_phy_port(bridge)
-            (_, phy2_number) = self._vswitch.add_phy_port(bridge)
-            (_, vport1_number) = self._vswitch.add_vport(bridge)
-            (_, vport2_number) = self._vswitch.add_vport(bridge)
-            (_, vport3_number) = self._vswitch.add_vport(bridge)
-            (_, vport4_number) = self._vswitch.add_vport(bridge)
-
-            self._vswitch.del_flow(bridge)
-
-            # configure flows according to the TC definition
-            flow_template = _FLOW_TEMPLATE.copy()
-            if self._traffic['flow_type'] == 'IP':
-                flow_template.update({'dl_type':'0x0800', 'nw_src':self._traffic['l3']['srcip'],
-                                      'nw_dst':self._traffic['l3']['dstip']})
-
-            flow1 = add_ports_to_flow(flow_template, phy1_number,
-                                      vport1_number)
-            flow2 = add_ports_to_flow(flow_template, vport2_number,
-                                      vport3_number)
-            flow3 = add_ports_to_flow(flow_template, vport4_number,
-                                      phy2_number)
-            self._vswitch.add_flow(bridge, flow1)
-            self._vswitch.add_flow(bridge, flow2)
-            self._vswitch.add_flow(bridge, flow3)
-
-            if self._traffic['bidir'] == 'True':
-                flow4 = add_ports_to_flow(flow_template, phy2_number,
-                                          vport4_number)
-                flow5 = add_ports_to_flow(flow_template, vport3_number,
-                                          vport2_number)
-                flow6 = add_ports_to_flow(flow_template, vport1_number,
-                                          phy1_number)
-                self._vswitch.add_flow(bridge, flow4)
-                self._vswitch.add_flow(bridge, flow5)
-                self._vswitch.add_flow(bridge, flow6)
-
-        except:
-            self._vswitch.stop()
-            raise
-
-    def stop(self):
-        """Tears down the switch created in setup().
-        """
-        self._logger.debug('Stop using ' + str(self._vswitch_class))
-        self._vswitch.stop()
-
-    def __enter__(self):
-        self.setup()
-
-    def __exit__(self, type_, value, traceback):
-        self.stop()
-
-    def get_vswitch(self):
-        """See IVswitchController for description
-        """
-        return self._vswitch
-
-    def get_ports_info(self):
-        """See IVswitchController for description
-        """
-        self._logger.debug('get_ports_info  using ' + str(self._vswitch_class))
-        return self._vswitch.get_ports(settings.getValue('VSWITCH_BRIDGE_NAME'))
-
-    def dump_vswitch_flows(self):
-        """See IVswitchController for description
-        """
-        self._vswitch.dump_flows(settings.getValue('VSWITCH_BRIDGE_NAME'))
diff --git a/core/vswitch_controller_pxp.py b/core/vswitch_controller_pxp.py
new file mode 100644 (file)
index 0000000..6f53b5a
--- /dev/null
@@ -0,0 +1,221 @@
+# Copyright 2016 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""VSwitch controller for multi VM scenarios with serial or parallel connection
+"""
+
+import logging
+import netaddr
+
+from core.vswitch_controller import IVswitchController
+from vswitches.utils import add_ports_to_flow
+from conf import settings
+
+_FLOW_TEMPLATE = {
+    'idle_timeout': '0'
+}
+
+_PROTO_TCP = 6
+_PROTO_UDP = 17
+
+class VswitchControllerPXP(IVswitchController):
+    """VSwitch controller for PXP deployment scenario.
+    """
+    def __init__(self, deployment, vswitch_class, traffic):
+        """Initializes up the prerequisites for the PXP deployment scenario.
+
+        :vswitch_class: the vSwitch class to be used.
+        :deployment: the deployment scenario to configure
+        :traffic: dictionary with detailed traffic definition
+        """
+        self._logger = logging.getLogger(__name__)
+        self._vswitch_class = vswitch_class
+        self._vswitch = vswitch_class()
+        self._pxp_topology = 'parallel' if deployment.startswith('pvpv') else 'serial'
+        if deployment == 'pvp':
+            self._pxp_vm_count = 1
+        elif deployment.startswith('pvvp') or deployment.startswith('pvpv'):
+            if len(deployment) > 4:
+                self._pxp_vm_count = int(deployment[4:])
+            else:
+                self._pxp_vm_count = 2
+        else:
+            raise RuntimeError('Unknown number of VMs involved in {} deployment.'.format(deployment))
+
+        self._deployment_scenario = deployment
+
+        self._traffic = traffic.copy()
+        self._bidir = True if self._traffic['bidir'] == 'True' else False
+        self._logger.debug('Creation using ' + str(self._vswitch_class))
+        self._bridge = settings.getValue('VSWITCH_BRIDGE_NAME')
+
+    def setup(self):
+        """ Sets up the switch for PXP
+        """
+        self._logger.debug('Setup using ' + str(self._vswitch_class))
+
+        try:
+            self._vswitch.start()
+
+            self._vswitch.add_switch(self._bridge)
+
+            # create physical ports
+            (_, phy1_number) = self._vswitch.add_phy_port(self._bridge)
+            (_, phy2_number) = self._vswitch.add_phy_port(self._bridge)
+
+            # create VM ports
+            # initialize vport array to requested number of VMs
+            guest_nics = settings.getValue('GUEST_NICS_NR')
+            vm_ports = [[] for _ in range(self._pxp_vm_count)]
+            # create as many VM ports as requested by configuration, but configure
+            # only even number of NICs or just one
+            for vmindex in range(self._pxp_vm_count):
+                # just for case, enforce even number of NICs or 1
+                nics_nr = int(guest_nics[vmindex] / 2) * 2 if guest_nics[vmindex] > 1 else 1
+                self._logger.debug('Create %s vports for %s. VM with index %s',
+                                   nics_nr, vmindex + 1, vmindex)
+                for _ in range(nics_nr):
+                    (_, vport) = self._vswitch.add_vport(self._bridge)
+                    vm_ports[vmindex].append(vport)
+
+            self._vswitch.del_flow(self._bridge)
+
+            # configure flows according to the TC definition
+            if self._pxp_topology == 'serial':
+                flow = _FLOW_TEMPLATE.copy()
+                if self._traffic['flow_type'] == 'IP':
+                    flow.update({'dl_type':'0x0800',
+                                 'nw_src':self._traffic['l3']['srcip'],
+                                 'nw_dst':self._traffic['l3']['dstip']})
+
+                # insert flows for phy ports first
+                # from 1st PHY to 1st vport of 1st VM
+                self._add_flow(flow,
+                               phy1_number,
+                               vm_ports[0][0],
+                               self._bidir)
+                # from last vport of last VM to 2nd phy
+                self._add_flow(flow,
+                               vm_ports[self._pxp_vm_count-1][-1],
+                               phy2_number,
+                               self._bidir)
+
+                # add serial connections among VMs and VM NICs pairs if needed
+                # in case of multiple NICs pairs per VM, the pairs are chained
+                # first, before flow to the next VM is created
+                for vmindex in range(self._pxp_vm_count):
+                    # connect VMs NICs pairs in case of 4 and more NICs per VM
+                    connections = [(vm_ports[vmindex][2*(x+1)-1],
+                                    vm_ports[vmindex][2*(x+1)])
+                                   for x in range(int(len(vm_ports[vmindex])/2)-1)]
+                    for connection in connections:
+                        self._add_flow(flow,
+                                       connection[0],
+                                       connection[1],
+                                       self._bidir)
+                    # connect last NICs to the next VM if there is any
+                    if self._pxp_vm_count > vmindex + 1:
+                        self._add_flow(flow,
+                                       vm_ports[vmindex][-1],
+                                       vm_ports[vmindex+1][0],
+                                       self._bidir)
+            else:
+                proto = _PROTO_TCP if self._traffic['l3']['proto'].lower() == 'tcp' else _PROTO_UDP
+                dst_mac_value = netaddr.EUI(self._traffic['l2']['dstmac']).value
+                dst_ip_value = netaddr.IPAddress(self._traffic['l3']['dstip']).value
+                # initialize stream index; every NIC pair of every VM uses unique stream
+                stream = 0
+                for vmindex in range(self._pxp_vm_count):
+                    # iterate through all VMs NIC pairs...
+                    if len(vm_ports[vmindex]) > 1:
+                        port_pairs = [(vm_ports[vmindex][2*x],
+                                       vm_ports[vmindex][2*x+1]) for x in range(int(len(vm_ports[vmindex])/2))]
+                    else:
+                        # ...or connect VM with just one NIC to both phy ports
+                        port_pairs = [(vm_ports[vmindex][0], vm_ports[vmindex][0])]
+
+                    for port_pair in port_pairs:
+                        flow_p = _FLOW_TEMPLATE.copy()
+                        flow_v = _FLOW_TEMPLATE.copy()
+
+                        # update flow based on trafficgen settings
+                        if self._traffic['stream_type'] == 'L2':
+                            tmp_mac = netaddr.EUI(dst_mac_value + stream)
+                            tmp_mac.dialect = netaddr.mac_unix_expanded
+                            flow_p.update({'dl_dst':tmp_mac})
+                        elif self._traffic['stream_type'] == 'L3':
+                            tmp_ip = netaddr.IPAddress(dst_ip_value + stream)
+                            flow_p.update({'dl_type':'0x800', 'nw_dst':tmp_ip})
+                        elif self._traffic['stream_type'] == 'L4':
+                            flow_p.update({'dl_type':'0x800', 'nw_proto':proto, 'tp_dst':stream})
+                        else:
+                            raise RuntimeError('Unknown stream_type {}'.format(self._traffic['stream_type']))
+
+                        # insert flow to dispatch traffic from physical ports
+                        # to VMs based on stream type; all traffic from VMs is
+                        # sent to physical ports to avoid issues with MAC swapping
+                        # and upper layer mods performed inside guests
+                        self._add_flow(flow_p, phy1_number, port_pair[0])
+                        self._add_flow(flow_v, port_pair[1], phy2_number)
+                        if self._bidir:
+                            self._add_flow(flow_p, phy2_number, port_pair[1])
+                            self._add_flow(flow_v, port_pair[0], phy1_number)
+
+                        # every NIC pair needs its own unique traffic stream
+                        stream += 1
+
+        except:
+            self._vswitch.stop()
+            raise
+
+    def stop(self):
+        """Tears down the switch created in setup().
+        """
+        self._logger.debug('Stop using ' + str(self._vswitch_class))
+        self._vswitch.stop()
+
+    def _add_flow(self, flow, port1, port2, reverse_flow=False):
+        """ Helper method to insert flow into the vSwitch
+        """
+        self._vswitch.add_flow(self._bridge,
+                               add_ports_to_flow(flow,
+                                                 port1,
+                                                 port2))
+        if reverse_flow:
+            self._vswitch.add_flow(self._bridge,
+                                   add_ports_to_flow(flow,
+                                                     port2,
+                                                     port1))
+
+    def __enter__(self):
+        self.setup()
+
+    def __exit__(self, type_, value, traceback):
+        self.stop()
+
+    def get_vswitch(self):
+        """See IVswitchController for description
+        """
+        return self._vswitch
+
+    def get_ports_info(self):
+        """See IVswitchController for description
+        """
+        self._logger.debug('get_ports_info  using ' + str(self._vswitch_class))
+        return self._vswitch.get_ports(self._bridge)
+
+    def dump_vswitch_flows(self):
+        """See IVswitchController for description
+        """
+        self._vswitch.dump_flows(self._bridge)
index eeefe75..2f3faae 100755 (executable)
@@ -169,12 +169,12 @@ hugepage amounts to support running these configurations. It is recommended
 to configure 1GB hugepages as the pagesize.
 
 The amount of hugepages needed depends on your configuration files in vsperf.
-Each guest image requires 4096 MB by default according to the default settings
+Each guest image requires 2048 MB by default according to the default settings
 in the ``04_vnf.conf`` file.
 
 .. code:: bash
 
-    GUEST_MEMORY = ['4096', '4096']
+    GUEST_MEMORY = ['2048']
 
 The dpdk startup parameters also require an amount of hugepages depending on
 your configuration in the ``02_vswitch.conf`` file.
index e61b3ea..cdf9f31 100755 (executable)
@@ -34,7 +34,7 @@ List all the cli options:
 
    $ ./vsperf -h
 
-Run all tests that have ``tput`` in their name - ``p2p_tput``, ``pvp_tput`` etc.:
+Run all tests that have ``tput`` in their name - ``phy2phy_tput``, ``pvp_tput`` etc.:
 
 .. code-block:: console
 
@@ -100,12 +100,305 @@ The values in the file specified by ``--conf-file`` takes precedence over all
 the other configuration files and does not have to follow the naming
 convention.
 
+Configuration of GUEST options
+------------------------------
+
+VSPERF is able to setup scenarios involving a number of VMs in series or in parallel.
+All configuration options related to a particular VM instance are defined as
+lists and prefixed with ``GUEST_`` label. It is essential, that there is enough
+items in all ``GUEST_`` options to cover all VM instances involved in the test.
+In case there is not enough items, then VSPERF will use the first item of
+particular ``GUEST_`` option to expand the list to required length.
+
+Example of option expansion for 4 VMs:
+
+    .. code-block:: python
+
+       """
+       Original values:
+       """
+       GUEST_SMP = ['2']
+       GUEST_MEMORY = ['2048', '4096']
+
+       """
+       Values after automatic expansion:
+       """
+       GUEST_SMP = ['2', '2', '2', '2']
+       GUEST_MEMORY = ['2048', '4096', '2048', '2048']
+
+
+First option can contain macros starting with ``#`` to generate VM specific values.
+These macros can be used only for options of ``list`` or ``str`` types with ``GUEST_``
+prefix.
+
+Example of macros and their expnasion for 2 VMs:
+
+    .. code-block:: python
+
+       """
+       Original values:
+       """
+       GUEST_SHARE_DIR = ['/tmp/qemu#VMINDEX_share']
+       GUEST_BRIDGE_IP = ['#IP(1.1.1.5)/16']
+
+       """
+       Values after automatic expansion:
+       """
+       GUEST_SHARE_DIR = ['/tmp/qemu0_share', '/tmp/qemu1_share']
+       GUEST_BRIDGE_IP = ['1.1.1.5/16', '1.1.1.6/16']
+
+Additional examples are available at ``04_vnf.conf``.
+
+Note: In  case, that macro is detected in the first item of the list, then
+all other items are ignored and list content is created automatically.
+
+Multiple macros can be used inside one configuration option definition, but macros
+cannot be used inside other macros. The only exception is macro ``#VMINDEX``, which
+is expanded first and thus it can be used inside other macros.
+
+Following macros are supported:
+
+  * ``#VMINDEX`` - it is replaced by index of VM being executed; This macro
+    is expanded first, so it can be used inside other macros.
+
+    Example:
+
+    .. code-block:: python
+
+       GUEST_SHARE_DIR = ['/tmp/qemu#VMINDEX_share']
+
+  * ``#MAC(mac_address[, step])`` - it will iterate given ``mac_address``
+    with optional ``step``. In case that step is not defined, then it is set to 1.
+    It means, that first VM will use the value of ``mac_address``, second VM
+    value of ``mac_address`` increased by ``step``, etc.
+
+    Example:
+
+    .. code-block:: python
+
+       GUEST_NICS = [[{'mac' : '#MAC(00:00:00:00:00:01,2)'}]]
+
+  * ``#IP(ip_address[, step])`` - it will iterate given ``ip_address``
+    with optional ``step``. In case that step is not defined, then it is set to 1.
+    It means, that first VM will use the value of ``ip_address``, second VM
+    value of ``ip_address`` increased by ``step``, etc.
+
+    Example:
+
+    .. code-block:: python
+
+       GUEST_BRIDGE_IP = ['#IP(1.1.1.5)/16']
+
+  * ``#EVAL(expression)`` - it will evaluate given ``expression`` as python code;
+    Only simple expressions should be used. Call of the functions is not supported.
+
+    Example:
+
+    .. code-block:: python
+
+       GUEST_CORE_BINDING = [('#EVAL(6+2*#VMINDEX)', '#EVAL(7+2*#VMINDEX)')]
 
 Other Configuration
 -------------------
 
 ``conf.settings`` also loads configuration from the command line and from the environment.
 
+PXP Deployment
+==============
+
+Every testcase uses one of the supported deployment scenarios to setup test environment.
+The controller responsible for a given scenario configures flows in the vswitch to route
+traffic among physical interfaces connected to the traffic generator and virtual
+machines. VSPERF supports several deployments including PXP deployment, which can
+setup various scenarios with multiple VMs.
+
+These scenarios are realized by VswitchControllerPXP class, which can configure and
+execute given number of VMs in serial or parallel configurations. Every VM can be
+configured with just one or an even number of interfaces. In case that VM has more than
+2 interfaces, then traffic is properly routed among pairs of interfaces.
+
+Example of traffic routing for VM with 4 NICs in serial configuration:
+
+.. code-block:: console
+
+                 +------------------------------------------+
+                 |  VM with 4 NICs                          |
+                 |  +---------------+    +---------------+  |
+                 |  |  Application  |    |  Application  |  |
+                 |  +---------------+    +---------------+  |
+                 |      ^       |            ^       |      |
+                 |      |       v            |       v      |
+                 |  +---------------+    +---------------+  |
+                 |  | logical ports |    | logical ports |  |
+                 |  |   0       1   |    |   2       3   |  |
+                 +--+---------------+----+---------------+--+
+                        ^       :            ^       :
+                        |       |            |       |
+                        :       v            :       v
+        +-----------+---------------+----+---------------+----------+
+        | vSwitch   |   0       1   |    |   2       3   |          |
+        |           | logical ports |    | logical ports |          |
+        | previous  +---------------+    +---------------+   next   |
+        | VM or PHY     ^       |            ^       |     VM or PHY|
+        |   port   -----+       +------------+       +--->   port   |
+        +-----------------------------------------------------------+
+
+It is also possible to define different number of interfaces for each VM to better
+simulate real scenarios.
+
+Example of traffic routing for 2 VMs in serial configuration, where 1st VM has
+4 NICs and 2nd VM 2 NICs:
+
+.. code-block:: console
+
+           +------------------------------------------+  +---------------------+
+           |  1st VM with 4 NICs                      |  |  2nd VM with 2 NICs |
+           |  +---------------+    +---------------+  |  |  +---------------+  |
+           |  |  Application  |    |  Application  |  |  |  |  Application  |  |
+           |  +---------------+    +---------------+  |  |  +---------------+  |
+           |      ^       |            ^       |      |  |      ^       |      |
+           |      |       v            |       v      |  |      |       v      |
+           |  +---------------+    +---------------+  |  |  +---------------+  |
+           |  | logical ports |    | logical ports |  |  |  | logical ports |  |
+           |  |   0       1   |    |   2       3   |  |  |  |   0       1   |  |
+           +--+---------------+----+---------------+--+  +--+---------------+--+
+                  ^       :            ^       :               ^       :
+                  |       |            |       |               |       |
+                  :       v            :       v               :       v
+  +-----------+---------------+----+---------------+-------+---------------+----------+
+  | vSwitch   |   0       1   |    |   2       3   |       |   4       5   |          |
+  |           | logical ports |    | logical ports |       | logical ports |          |
+  | previous  +---------------+    +---------------+       +---------------+   next   |
+  | VM or PHY     ^       |            ^       |               ^       |     VM or PHY|
+  |   port   -----+       +------------+       +---------------+       +---->  port   |
+  +-----------------------------------------------------------------------------------+
+
+The number of VMs involved in the test and the type of their connection is defined
+by deployment name as follows:
+
+  * ``pvvp[number]`` - configures scenario with VMs connected in series with
+    optional ``number`` of VMs. In case that ``number`` is not specified, then
+    2 VMs will be used.
+
+    Example of 2 VMs in a serial configuration:
+
+    .. code-block:: console
+
+       +----------------------+  +----------------------+
+       |   1st VM             |  |   2nd VM             |
+       |   +---------------+  |  |   +---------------+  |
+       |   |  Application  |  |  |   |  Application  |  |
+       |   +---------------+  |  |   +---------------+  |
+       |       ^       |      |  |       ^       |      |
+       |       |       v      |  |       |       v      |
+       |   +---------------+  |  |   +---------------+  |
+       |   | logical ports |  |  |   | logical ports |  |
+       |   |   0       1   |  |  |   |   0       1   |  |
+       +---+---------------+--+  +---+---------------+--+
+               ^       :                 ^       :
+               |       |                 |       |
+               :       v                 :       v
+       +---+---------------+---------+---------------+--+
+       |   |   0       1   |         |   3       4   |  |
+       |   | logical ports | vSwitch | logical ports |  |
+       |   +---------------+         +---------------+  |
+       |       ^       |                 ^       |      |
+       |       |       +-----------------+       v      |
+       |   +----------------------------------------+   |
+       |   |              physical ports            |   |
+       |   |      0                         1       |   |
+       +---+----------------------------------------+---+
+                  ^                         :
+                  |                         |
+                  :                         v
+       +------------------------------------------------+
+       |                                                |
+       |                traffic generator               |
+       |                                                |
+       +------------------------------------------------+
+
+  * ``pvpv[number]`` - configures scenario with VMs connected in parallel with
+    optional ``number`` of VMs. In case that ``number`` is not specified, then
+    2 VMs will be used. Multistream feature is used to route traffic to particular
+    VMs (or NIC pairs of every VM). It means, that VSPERF will enable multistream
+    feaure and sets the number of streams to the number of VMs and their NIC
+    pairs. Traffic will be dispatched based on Stream Type, i.e. by UDP port,
+    IP address or MAC address.
+
+    Example of 2 VMs in a parallel configuration, where traffic is dispatched
+        based on the UDP port.
+
+    .. code-block:: console
+
+       +----------------------+  +----------------------+
+       |   1st VM             |  |   2nd VM             |
+       |   +---------------+  |  |   +---------------+  |
+       |   |  Application  |  |  |   |  Application  |  |
+       |   +---------------+  |  |   +---------------+  |
+       |       ^       |      |  |       ^       |      |
+       |       |       v      |  |       |       v      |
+       |   +---------------+  |  |   +---------------+  |
+       |   | logical ports |  |  |   | logical ports |  |
+       |   |   0       1   |  |  |   |   0       1   |  |
+       +---+---------------+--+  +---+---------------+--+
+               ^       :                 ^       :
+               |       |                 |       |
+               :       v                 :       v
+       +---+---------------+---------+---------------+--+
+       |   |   0       1   |         |   3       4   |  |
+       |   | logical ports | vSwitch | logical ports |  |
+       |   +---------------+         +---------------+  |
+       |      ^         |                 ^       :     |
+       |      |     ......................:       :     |
+       |  UDP | UDP :   |                         :     |
+       |  port| port:   +--------------------+    :     |
+       |   0  |  1  :                        |    :     |
+       |      |     :                        v    v     |
+       |   +----------------------------------------+   |
+       |   |              physical ports            |   |
+       |   |    0                               1   |   |
+       +---+----------------------------------------+---+
+                ^                               :
+                |                               |
+                :                               v
+       +------------------------------------------------+
+       |                                                |
+       |                traffic generator               |
+       |                                                |
+       +------------------------------------------------+
+
+
+PXP deployment is backward compatible with PVP deployment, where ``pvp`` is
+an alias for ``pvvp1`` and it executes just one VM.
+
+The number of interfaces used by VMs is defined by configuration option
+``GUEST_NICS_NR``. In case that more than one pair of interfaces is defined
+for VM, then:
+
+    * for ``pvvp`` (serial) scenario every NIC pair is connected in serial
+      before connection to next VM is created
+    * for ``pvpv`` (parallel) scenario every NIC pair is directly connected
+      to the physical ports and unique traffic stream is assigned to it
+
+Examples:
+
+    * Deployment ``pvvp10`` will start 10 VMs and connects them in series
+    * Deployment ``pvpv4`` will start 4 VMs and connects them in parallel
+    * Deployment ``pvpv1`` and GUEST_NICS_NR = [4] will start 1 VM with
+      4 interfaces and every NIC pair is directly connected to the
+      physical ports
+    * Deployment ``pvvp`` and GUEST_NICS_NR = [2, 4] will start 2 VMs;
+      1st VM will have 2 interfaces and 2nd VM 4 interfaces. These interfaces
+      will be connected in serial, i.e. traffic will flow as follows:
+      PHY1 -> VM1_1 -> VM1_2 -> VM2_1 -> VM2_2 -> VM2_3 -> VM2_4 -> PHY2
+
+Note: In case that only 1 or more than 2 NICs are configured for VM,
+then ``testpmd`` should be used as forwarding application inside the VM.
+As it is able to forward traffic between multiple VM NIC pairs.
+
+Note: In case of ``linux_bridge``, all NICs are connected to the same
+bridge inside the VM.
+
 VM, vSwitch, Traffic Generator Independence
 ===========================================
 
@@ -198,7 +491,7 @@ and Forwarding Applications from other components.
 
 The controlled classes provide basic primitive operations. The Controllers
 sequence and co-ordinate these primitive operation in to useful actions. For
-instance the vswitch_controller_PVP can be used to bring any vSwitch (that
+instance the vswitch_controller_p2p can be used to bring any vSwitch (that
 implements the primitives defined in IVSwitch) into the configuration required
 by the Phy-to-Phy  Deployment Scenario.
 
index 4d2c6ba..aec5360 100755 (executable)
@@ -11,9 +11,9 @@ msc {
   vsperf => testcase [ label="run()" ];
   --- [ label = " skipping details of finding and creating correct subclasses of IVSwitch, ITrafficGenerator etc." ];
   testcase => vswitch_ctl [ label="create(vswitch_class" ];
-  vswitch_ctl note vswitch_ctl [ label="vswitch_ctl is instance of VswitchControllerPvp"];
+  vswitch_ctl note vswitch_ctl [ label="vswitch_ctl is instance of VswitchControllerPXP"];
   testcase => vnf_ctl [ label="create(vnf_class)" ];
-  vnf_ctl note vnf_ctl [ label="vnf_ctl is instance of VnfControllerPvp"];
+  vnf_ctl note vnf_ctl [ label="vnf_ctl is instance of VnfController"];
   testcase => traffic_ctl [ label="create()" ];
   traffic_ctl note traffic_ctl [ label="traffic_ctl is instance of TrafficControllerRFC2544"];
   |||;
index c55b5a2..ce647c6 100755 (executable)
@@ -85,6 +85,10 @@ contents. Any configuration item mentioned in any .conf file in
 ``./conf`` directory can be added and that item will be overridden by
 the custom configuration value.
 
+Further details about configuration files evaluation and special behaviour
+of options with ``GUEST_`` prefix could be found at `design document
+<http://artifacts.opnfv.org/vswitchperf/docs/design/vswitchperf_design.html#configuration>`__.
+
 Using a custom settings file
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -105,11 +109,15 @@ described like so (1 = max priority):
 2. Environment variables
 3. Configuration file(s)
 
+Further details about configuration files evaluation and special behaviour
+of options with ``GUEST_`` prefix could be found at `design document
+<http://artifacts.opnfv.org/vswitchperf/docs/design/vswitchperf_design.html#configuration>`__.
+
 vloop_vnf
 ^^^^^^^^^
 
-vsperf uses a VM called vloop_vnf for looping traffic in the PVP and PVVP
-deployment scenarios. The image can be downloaded from
+vsperf uses a VM image called vloop_vnf for looping traffic in the deployment
+scenarios involving VMs. The image can be downloaded from
 `<http://artifacts.opnfv.org/>`__.
 
 .. code-block:: console
@@ -226,8 +234,8 @@ set the ports.
     $ ./vsperf --vswitch OvsVanilla
 
 
-Executing PVP and PVVP tests
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Executing tests with VMs
+^^^^^^^^^^^^^^^^^^^^^^^^
 
 To run tests using vhost-user as guest access method:
 
@@ -252,8 +260,8 @@ To run tests using vhost-user as guest access method:
 
      $ ./vsperf --conf-file=<path_to_custom_conf>/10_custom.conf
 
-Executing PVP tests using Vanilla OVS
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Executing tests with VMs using Vanilla OVS
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 To run tests using Vanilla OVS:
 
@@ -391,15 +399,15 @@ deployment.
 
 .. _guest-loopback-application:
 
-Selection of loopback application for PVP and PVVP tests
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Selection of loopback application for tests with VMs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 To select loopback application, which will perform traffic forwarding
 inside VM, following configuration parameter should be configured:
 
 .. code-block:: console
 
-     GUEST_LOOPBACK = ['testpmd', 'testpmd']
+     GUEST_LOOPBACK = ['testpmd']
 
 or use --test-param
 
@@ -419,9 +427,16 @@ Supported loopback applications are:
                        ensure traffic forwarding between its interfaces
 
 Guest loopback application must be configured, otherwise traffic
-will not be forwarded by VM and testcases with PVP and PVVP deployments
+will not be forwarded by VM and testcases with VM related deployments
 will fail. Guest loopback application is set to 'testpmd' by default.
 
+Note: In case that only 1 or more than 2 NICs are configured for VM,
+then 'testpmd' should be used. As it is able to forward traffic between
+multiple VM NIC pairs.
+
+Note: In case of linux_bridge, all guest NICs are connected to the same
+bridge inside the guest.
+
 Multi-Queue Configuration
 ^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -622,8 +637,8 @@ OVS with DPDK and QEMU
 
 If you encounter the following error: "before (last 100 chars):
 '-path=/dev/hugepages,share=on: unable to map backing store for
-hugepages: Cannot allocate memory\r\n\r\n" with the PVP or PVVP
-deployment scenario, check the amount of hugepages on your system:
+hugepages: Cannot allocate memory\r\n\r\n" during qemu initialization,
+check the amount of hugepages on your system:
 
 .. code-block:: console
 
index 477c1de..bd9bb9c 100644 (file)
@@ -25,8 +25,8 @@ import subprocess
 import logging
 import glob
 
-from tools import tasks
 from conf import settings
+from tools import tasks
 from tools.module_manager import ModuleManager
 
 _LOGGER = logging.getLogger(__name__)
@@ -150,7 +150,7 @@ def _remove_vhost_net():
 def _vhost_user_cleanup():
     """Remove files created by vhost-user tests.
     """
-    for sock in settings.getValue('VHOST_USER_SOCKS'):
+    for sock in glob.glob(settings.getValue('VHOST_USER_SOCKS')):
         if os.path.exists(sock):
             try:
                 tasks.run_task(['sudo', 'rm', sock],
index 0123b30..6e215b4 100644 (file)
@@ -57,7 +57,6 @@ class TestCase(object):
         self._loadgen = None
         self._output_file = None
         self._tc_results = None
-        self.guest_loopback = []
         self._settings_original = {}
         self._settings_paths_modified = False
         self._testcast_run_time = None
@@ -70,7 +69,8 @@ class TestCase(object):
         # update global settings
         guest_loopback = get_test_param('guest_loopback', None)
         if guest_loopback:
-            self._update_settings('GUEST_LOOPBACK', [guest_loopback for dummy in S.getValue('GUEST_LOOPBACK')])
+            # we can put just one item, it'll be expanded automatically for all VMs
+            self._update_settings('GUEST_LOOPBACK', [guest_loopback])
 
         if 'VSWITCH' in self._settings_original or 'VNF' in self._settings_original:
             self._settings_original.update({
@@ -112,12 +112,6 @@ class TestCase(object):
                 self._tunnel_type = get_test_param('tunnel_type',
                                                    self._tunnel_type)
 
-        # identify guest loopback method, so it can be added into reports
-        if self.deployment == 'pvp':
-            self.guest_loopback.append(S.getValue('GUEST_LOOPBACK')[0])
-        else:
-            self.guest_loopback = S.getValue('GUEST_LOOPBACK').copy()
-
         # read configuration of streams; CLI parameter takes precedence to
         # testcase definition
         multistream = cfg.get('MultiStream', TRAFFIC_DEFAULTS['multistream'])
@@ -153,13 +147,6 @@ class TestCase(object):
         # Packet Forwarding mode
         self._vswitch_none = 'none' == S.getValue('VSWITCH').strip().lower()
 
-        # OVS Vanilla requires guest VM MAC address and IPs to work
-        if 'linux_bridge' in self.guest_loopback:
-            self._traffic['l2'].update({'srcmac': S.getValue('VANILLA_TGEN_PORT1_MAC'),
-                                        'dstmac': S.getValue('VANILLA_TGEN_PORT2_MAC')})
-            self._traffic['l3'].update({'srcip': S.getValue('VANILLA_TGEN_PORT1_IP'),
-                                        'dstip': S.getValue('VANILLA_TGEN_PORT2_IP')})
-
         # trafficgen configuration required for tests of tunneling protocols
         if self.deployment == "op2p":
             self._traffic['l2'].update({'srcmac':
@@ -192,13 +179,6 @@ class TestCase(object):
         # mount hugepages if needed
         self._mount_hugepages()
 
-        # verify enough hugepages are free to run the testcase
-        if not self._check_for_enough_hugepages():
-            raise RuntimeError('Not enough hugepages free to run test.')
-
-        # copy sources of l2 forwarding tools into VM shared dir if needed
-        self._copy_fwd_tools_for_all_guests()
-
         self._logger.debug("Controllers:")
         loader = Loader()
         self._traffic_ctl = component_factory.create_traffic(
@@ -209,6 +189,32 @@ class TestCase(object):
             self.deployment,
             loader.get_vnf_class())
 
+        # verify enough hugepages are free to run the testcase
+        if not self._check_for_enough_hugepages():
+            raise RuntimeError('Not enough hugepages free to run test.')
+
+        # perform guest related handling
+        if self._vnf_ctl.get_vnfs_number():
+            # copy sources of l2 forwarding tools into VM shared dir if needed
+            self._copy_fwd_tools_for_all_guests()
+
+            # in case of multi VM in parallel, set the number of streams to the number of VMs
+            if self.deployment.startswith('pvpv'):
+                # for each VM NIC pair we need an unique stream
+                streams = 0
+                for vm_nic in S.getValue('GUEST_NICS_NR')[:self._vnf_ctl.get_vnfs_number()]:
+                    streams += int(vm_nic / 2) if vm_nic > 1 else 1
+                self._logger.debug("VMs with parallel connection were detected. "
+                                   "Thus Number of streams was set to %s", streams)
+                self._traffic.update({'multistream': streams})
+
+            # OVS Vanilla requires guest VM MAC address and IPs to work
+            if 'linux_bridge' in S.getValue('GUEST_LOOPBACK'):
+                self._traffic['l2'].update({'srcmac': S.getValue('VANILLA_TGEN_PORT1_MAC'),
+                                            'dstmac': S.getValue('VANILLA_TGEN_PORT2_MAC')})
+                self._traffic['l3'].update({'srcip': S.getValue('VANILLA_TGEN_PORT1_IP'),
+                                            'dstip': S.getValue('VANILLA_TGEN_PORT2_IP')})
+
         if self._vswitch_none:
             self._vswitch_ctl = component_factory.create_pktfwd(
                 self.deployment,
@@ -350,8 +356,8 @@ class TestCase(object):
                 item[ResultsConstants.SCAL_STREAM_COUNT] = self._traffic['multistream']
                 item[ResultsConstants.SCAL_STREAM_TYPE] = self._traffic['stream_type']
                 item[ResultsConstants.SCAL_PRE_INSTALLED_FLOWS] = self._traffic['pre_installed_flows']
-            if self.deployment in ['pvp', 'pvvp'] and len(self.guest_loopback):
-                item[ResultsConstants.GUEST_LOOPBACK] = ' '.join(self.guest_loopback)
+            if self._vnf_ctl.get_vnfs_number():
+                item[ResultsConstants.GUEST_LOOPBACK] = ' '.join(S.getValue('GUEST_LOOPBACK'))
             if self._tunnel_type:
                 item[ResultsConstants.TUNNEL_TYPE] = self._tunnel_type
         return results
@@ -359,19 +365,15 @@ class TestCase(object):
     def _copy_fwd_tools_for_all_guests(self):
         """Copy dpdk and l2fwd code to GUEST_SHARE_DIR[s] based on selected deployment.
         """
-        # data are copied only for pvp and pvvp, so let's count number of 'v'
-        counter = 1
-        while counter <= self.deployment.count('v'):
-            self._copy_fwd_tools_for_guest(counter)
-            counter += 1
+        # consider only VNFs involved in the test
+        for guest_dir in set(S.getValue('GUEST_SHARE_DIR')[:self._vnf_ctl.get_vnfs_number()]):
+            self._copy_fwd_tools_for_guest(guest_dir)
 
-    def _copy_fwd_tools_for_guest(self, index):
+    def _copy_fwd_tools_for_guest(self, guest_dir):
         """Copy dpdk and l2fwd code to GUEST_SHARE_DIR of VM
 
         :param index: Index of VM starting from 1 (i.e. 1st VM has index 1)
         """
-        guest_dir = S.getValue('GUEST_SHARE_DIR')[index-1]
-
         # remove shared dir if it exists to avoid issues with file consistency
         if os.path.exists(guest_dir):
             tasks.run_task(['rm', '-f', '-r', guest_dir], self._logger,
@@ -381,7 +383,8 @@ class TestCase(object):
         os.makedirs(guest_dir)
 
         # copy sources into shared dir only if neccessary
-        if 'testpmd' in self.guest_loopback or 'l2fwd' in self.guest_loopback:
+        guest_loopback = set(S.getValue('GUEST_LOOPBACK'))
+        if 'testpmd' in guest_loopback or 'l2fwd' in guest_loopback:
             try:
                 tasks.run_task(['rsync', '-a', '-r', '-l', r'--exclude="\.git"',
                                 os.path.join(S.getValue('RTE_SDK_USER'), ''),
@@ -423,8 +426,8 @@ class TestCase(object):
         """
         hugepages_needed = 0
         hugepage_size = hugepages.get_hugepage_size()
-        # get hugepage amounts per guest
-        for guest in range(self.deployment.count('v')):
+        # get hugepage amounts per guest involved in the test
+        for guest in range(self._vnf_ctl.get_vnfs_number()):
             hugepages_needed += math.ceil((int(S.getValue(
                 'GUEST_MEMORY')[guest]) * 1000) / hugepage_size)
 
@@ -436,7 +439,7 @@ class TestCase(object):
             from vswitches import ovs_dpdk_vhost
             if ovs_dpdk_vhost.OvsDpdkVhost.old_dpdk_config():
                 match = re.search(
-                    '-socket-mem\s+(\d+),(\d+)',
+                    r'-socket-mem\s+(\d+),(\d+)',
                     ''.join(S.getValue('VSWITCHD_DPDK_ARGS')))
                 if match:
                     sock0_mem, sock1_mem = (int(match.group(1)) * 1024 / hugepage_size,
index dd9ad81..a9343fd 100644 (file)
@@ -32,7 +32,7 @@ class IVnfQemu(IVnf):
     Abstract class for controling an instance of QEMU
     """
     _cmd = None
-    _expect = S.getValue('GUEST_PROMPT_LOGIN')
+    _expect = None
     _proc_name = 'qemu'
 
     class GuestCommandFilter(logging.Filter):
@@ -47,28 +47,32 @@ class IVnfQemu(IVnf):
         Initialisation function.
         """
         super(IVnfQemu, self).__init__()
+
+        self._expect = S.getValue('GUEST_PROMPT_LOGIN')[self._number]
         self._logger = logging.getLogger(__name__)
         self._logfile = os.path.join(
             S.getValue('LOG_DIR'),
             S.getValue('LOG_FILE_QEMU')) + str(self._number)
         self._timeout = S.getValue('GUEST_TIMEOUT')[self._number]
         self._monitor = '%s/vm%dmonitor' % ('/tmp', self._number)
-        self._net1 = get_test_param('guest_nic1_name', None)
-        if self._net1 == None:
-            self._net1 = S.getValue('GUEST_NIC1_NAME')[self._number]
-        else:
-            self._net1 = self._net1.split(',')[self._number]
-        self._net2 = get_test_param('guest_nic2_name', None)
-        if self._net2 == None:
-            self._net2 = S.getValue('GUEST_NIC2_NAME')[self._number]
-        else:
-            self._net2 = self._net2.split(',')[self._number]
+        # read GUEST NICs configuration and use only defined NR of NICS
+        nics_nr = S.getValue('GUEST_NICS_NR')[self._number]
+        # and inform user about missconfiguration
+        if nics_nr < 1:
+            raise RuntimeError('At least one VM NIC is mandotory, but {} '
+                               'NICs are configured'.format(nics_nr))
+        elif nics_nr > 1 and nics_nr % 2:
+            nics_nr = int(nics_nr / 2) * 2
+            self._logger.warning('Odd number of NICs is configured, only '
+                                 '%s NICs will be used', nics_nr)
+
+        self._nics = S.getValue('GUEST_NICS')[self._number][:nics_nr]
 
         # set guest loopback application based on VNF configuration
         # cli option take precedence to config file values
         self._guest_loopback = S.getValue('GUEST_LOOPBACK')[self._number]
 
-        self._testpmd_fwd_mode = S.getValue('GUEST_TESTPMD_FWD_MODE')
+        self._testpmd_fwd_mode = S.getValue('GUEST_TESTPMD_FWD_MODE')[self._number]
         # in case of SRIOV we must ensure, that MAC addresses are not swapped
         if S.getValue('SRIOV_ENABLED') and self._testpmd_fwd_mode.startswith('mac') and \
            not S.getValue('VNF').endswith('PciPassthrough'):
@@ -86,7 +90,7 @@ class IVnfQemu(IVnf):
                      '-smp', str(S.getValue('GUEST_SMP')[self._number]),
                      '-cpu', 'host,migratable=off',
                      '-drive', 'if={},file='.format(S.getValue(
-                        'GUEST_BOOT_DRIVE_TYPE')) +
+                         'GUEST_BOOT_DRIVE_TYPE')[self._number]) +
                      S.getValue('GUEST_IMAGE')[self._number],
                      '-boot', 'c', '--enable-kvm',
                      '-monitor', 'unix:%s,server,nowait' % self._monitor,
@@ -99,7 +103,7 @@ class IVnfQemu(IVnf):
                      '-snapshot', '-net none', '-no-reboot',
                      '-drive',
                      'if=%s,format=raw,file=fat:rw:%s,snapshot=off' %
-                     (S.getValue('GUEST_SHARED_DRIVE_TYPE'),
+                     (S.getValue('GUEST_SHARED_DRIVE_TYPE')[self._number],
                       S.getValue('GUEST_SHARE_DIR')[self._number]),
                     ]
         self._configure_logging()
@@ -181,11 +185,11 @@ class IVnfQemu(IVnf):
         if not self._timeout:
             self._expect_process(timeout=timeout)
 
-        self._child.sendline(S.getValue('GUEST_USERNAME'))
-        self._child.expect(S.getValue('GUEST_PROMPT_PASSWORD'), timeout=5)
-        self._child.sendline(S.getValue('GUEST_PASSWORD'))
+        self._child.sendline(S.getValue('GUEST_USERNAME')[self._number])
+        self._child.expect(S.getValue('GUEST_PROMPT_PASSWORD')[self._number], timeout=5)
+        self._child.sendline(S.getValue('GUEST_PASSWORD')[self._number])
 
-        self._expect_process(S.getValue('GUEST_PROMPT'), timeout=5)
+        self._expect_process(S.getValue('GUEST_PROMPT')[self._number], timeout=5)
 
     def send_and_pass(self, cmd, timeout=30):
         """
@@ -197,10 +201,10 @@ class IVnfQemu(IVnf):
         :returns: None
         """
         self.execute(cmd)
-        self.wait(S.getValue('GUEST_PROMPT'), timeout=timeout)
+        self.wait(S.getValue('GUEST_PROMPT')[self._number], timeout=timeout)
         self.execute('echo $?')
         self._child.expect('^0$', timeout=1)  # expect a 0
-        self.wait(S.getValue('GUEST_PROMPT'), timeout=timeout)
+        self.wait(S.getValue('GUEST_PROMPT')[self._number], timeout=timeout)
 
     def _affinitize(self):
         """
@@ -285,11 +289,14 @@ class IVnfQemu(IVnf):
             self._logger.error('Unsupported guest loopback method "%s" was specified. Option'
                                ' "buildin" will be used as a fallback.', self._guest_loopback)
 
-    def wait(self, prompt=S.getValue('GUEST_PROMPT'), timeout=30):
+    def wait(self, prompt=None, timeout=30):
+        if prompt is None:
+            prompt = S.getValue('GUEST_PROMPT')[self._number]
         super(IVnfQemu, self).wait(prompt=prompt, timeout=timeout)
 
-    def execute_and_wait(self, cmd, timeout=30,
-                         prompt=S.getValue('GUEST_PROMPT')):
+    def execute_and_wait(self, cmd, timeout=30, prompt=None):
+        if prompt is None:
+            prompt = S.getValue('GUEST_PROMPT')[self._number]
         super(IVnfQemu, self).execute_and_wait(cmd, timeout=timeout,
                                                prompt=prompt)
 
@@ -305,13 +312,13 @@ class IVnfQemu(IVnf):
         """
         # mount shared directory
         self.execute_and_wait('umount /dev/sdb1')
-        self.execute_and_wait('rm -rf ' + S.getValue('GUEST_OVS_DPDK_DIR'))
-        self.execute_and_wait('mkdir -p ' + S.getValue('OVS_DPDK_SHARE'))
+        self.execute_and_wait('rm -rf ' + S.getValue('GUEST_OVS_DPDK_DIR')[self._number])
+        self.execute_and_wait('mkdir -p ' + S.getValue('GUEST_OVS_DPDK_SHARE')[self._number])
         self.execute_and_wait('mount -o ro,iocharset=utf8 /dev/sdb1 ' +
-                              S.getValue('OVS_DPDK_SHARE'))
-        self.execute_and_wait('mkdir -p ' + S.getValue('GUEST_OVS_DPDK_DIR'))
-        self.execute_and_wait('cp -r ' + os.path.join(S.getValue('OVS_DPDK_SHARE'), dirname) +
-                              ' ' + S.getValue('GUEST_OVS_DPDK_DIR'))
+                              S.getValue('GUEST_OVS_DPDK_SHARE')[self._number])
+        self.execute_and_wait('mkdir -p ' + S.getValue('GUEST_OVS_DPDK_DIR')[self._number])
+        self.execute_and_wait('cp -r ' + os.path.join(S.getValue('GUEST_OVS_DPDK_SHARE')[self._number], dirname) +
+                              ' ' + S.getValue('GUEST_OVS_DPDK_DIR')[self._number])
         self.execute_and_wait('umount /dev/sdb1')
 
     def _configure_disable_firewall(self):
@@ -344,7 +351,7 @@ class IVnfQemu(IVnf):
 
         # Guest images _should_ have 1024 hugepages by default,
         # but just in case:'''
-        self.execute_and_wait('sysctl vm.nr_hugepages=1024')
+        self.execute_and_wait('sysctl vm.nr_hugepages={}'.format(S.getValue('GUEST_HUGEPAGES_NR')[self._number]))
 
         # Mount hugepages
         self.execute_and_wait('mkdir -p /dev/hugepages')
@@ -352,19 +359,19 @@ class IVnfQemu(IVnf):
             'mount -t hugetlbfs hugetlbfs /dev/hugepages')
 
         # build and configure system for dpdk
-        self.execute_and_wait('cd ' + S.getValue('GUEST_OVS_DPDK_DIR') +
+        self.execute_and_wait('cd ' + S.getValue('GUEST_OVS_DPDK_DIR')[self._number] +
                               '/DPDK')
         self.execute_and_wait('export CC=gcc')
         self.execute_and_wait('export RTE_SDK=' +
-                              S.getValue('GUEST_OVS_DPDK_DIR') + '/DPDK')
+                              S.getValue('GUEST_OVS_DPDK_DIR')[self._number] + '/DPDK')
         self.execute_and_wait('export RTE_TARGET=%s' % S.getValue('RTE_TARGET'))
 
         # modify makefile if needed
         self._modify_dpdk_makefile()
 
         # disable network interfaces, so DPDK can take care of them
-        self.execute_and_wait('ifdown ' + self._net1)
-        self.execute_and_wait('ifdown ' + self._net2)
+        for nic in self._nics:
+            self.execute_and_wait('ifdown ' + nic['device'])
 
         # build and insert igb_uio and rebind interfaces to it
         self.execute_and_wait('make RTE_OUTPUT=$RTE_SDK/$RTE_TARGET -C '
@@ -373,35 +380,30 @@ class IVnfQemu(IVnf):
         self.execute_and_wait('insmod %s/kmod/igb_uio.ko' %
                               S.getValue('RTE_TARGET'))
         self.execute_and_wait('./tools/dpdk*bind.py --status')
-        self.execute_and_wait(
-            './tools/dpdk*bind.py -u' ' ' +
-            S.getValue('GUEST_NET1_PCI_ADDRESS')[self._number] + ' ' +
-            S.getValue('GUEST_NET2_PCI_ADDRESS')[self._number])
-        self.execute_and_wait(
-            './tools/dpdk*bind.py -b igb_uio' ' ' +
-            S.getValue('GUEST_NET1_PCI_ADDRESS')[self._number] + ' ' +
-            S.getValue('GUEST_NET2_PCI_ADDRESS')[self._number])
+        pci_list = ' '.join([nic['pci'] for nic in self._nics])
+        self.execute_and_wait('./tools/dpdk*bind.py -u ' + pci_list)
+        self.execute_and_wait('./tools/dpdk*bind.py -b igb_uio ' + pci_list)
         self.execute_and_wait('./tools/dpdk*bind.py --status')
 
         # build and run 'test-pmd'
-        self.execute_and_wait('cd ' + S.getValue('GUEST_OVS_DPDK_DIR') +
+        self.execute_and_wait('cd ' + S.getValue('GUEST_OVS_DPDK_DIR')[self._number] +
                               '/DPDK/app/test-pmd')
         self.execute_and_wait('make clean')
         self.execute_and_wait('make')
-        if int(S.getValue('GUEST_NIC_QUEUES')):
+        if int(S.getValue('GUEST_NIC_QUEUES')[self._number]):
             self.execute_and_wait(
                 './testpmd {} -n4 --socket-mem 512 --'.format(
-                    S.getValue('GUEST_TESTPMD_CPU_MASK')) +
+                    S.getValue('GUEST_TESTPMD_CPU_MASK')[self._number]) +
                 ' --burst=64 -i --txqflags=0xf00 ' +
                 '--nb-cores={} --rxq={} --txq={} '.format(
-                    S.getValue('GUEST_TESTPMD_NB_CORES'),
-                    S.getValue('GUEST_TESTPMD_TXQ'),
-                    S.getValue('GUEST_TESTPMD_RXQ')) +
+                    S.getValue('GUEST_TESTPMD_NB_CORES')[self._number],
+                    S.getValue('GUEST_TESTPMD_TXQ')[self._number],
+                    S.getValue('GUEST_TESTPMD_RXQ')[self._number]) +
                 '--disable-hw-vlan', 60, "Done")
         else:
             self.execute_and_wait(
                 './testpmd {} -n 4 --socket-mem 512 --'.format(
-                    S.getValue('GUEST_TESTPMD_CPU_MASK')) +
+                    S.getValue('GUEST_TESTPMD_CPU_MASK')[self._number]) +
                 ' --burst=64 -i --txqflags=0xf00 ' +
                 '--disable-hw-vlan', 60, "Done")
         self.execute('set fwd ' + self._testpmd_fwd_mode, 1)
@@ -412,44 +414,51 @@ class IVnfQemu(IVnf):
         """
         Configure VM to perform L2 forwarding between NICs by l2fwd module
         """
-        if int(S.getValue('GUEST_NIC_QUEUES')):
+        if int(S.getValue('GUEST_NIC_QUEUES')[self._number]):
             self._set_multi_queue_nic()
         self._configure_copy_sources('l2fwd')
         self._configure_disable_firewall()
 
+        # configure all interfaces
+        for nic in self._nics:
+            self.execute('ip addr add ' +
+                         nic['ip'] + ' dev ' + nic['device'])
+            self.execute('ip link set dev ' + nic['device'] + ' up')
+
         # build and configure system for l2fwd
-        self.execute_and_wait('cd ' + S.getValue('GUEST_OVS_DPDK_DIR') +
+        self.execute_and_wait('cd ' + S.getValue('GUEST_OVS_DPDK_DIR')[self._number] +
                               '/l2fwd')
         self.execute_and_wait('export CC=gcc')
 
         self.execute_and_wait('make')
-        self.execute_and_wait('insmod ' + S.getValue('GUEST_OVS_DPDK_DIR') +
-                              '/l2fwd' + '/l2fwd.ko net1=' + self._net1 +
-                              ' net2=' + self._net2)
+        if len(self._nics) == 2:
+            self.execute_and_wait('insmod ' + S.getValue('GUEST_OVS_DPDK_DIR')[self._number] +
+                                  '/l2fwd' + '/l2fwd.ko net1=' + self._nics[0]['device'] +
+                                  ' net2=' + self._nics[1]['device'])
+        else:
+            raise RuntimeError('l2fwd can forward only between 2 NICs, but {} NICs are '
+                               'configured inside GUEST'.format(len(self._nics)))
 
     def _configure_linux_bridge(self):
         """
         Configure VM to perform L2 forwarding between NICs by linux bridge
         """
-        if int(S.getValue('GUEST_NIC_QUEUES')):
+        if int(S.getValue('GUEST_NIC_QUEUES')[self._number]):
             self._set_multi_queue_nic()
         self._configure_disable_firewall()
 
-        self.execute('ip addr add ' +
-                     S.getValue('VANILLA_NIC1_IP_CIDR')[self._number] +
-                     ' dev ' + self._net1)
-        self.execute('ip link set dev ' + self._net1 + ' up')
-
-        self.execute('ip addr add ' +
-                     S.getValue('VANILLA_NIC2_IP_CIDR')[self._number] +
-                     ' dev ' + self._net2)
-        self.execute('ip link set dev ' + self._net2 + ' up')
-
         # configure linux bridge
         self.execute('brctl addbr br0')
-        self.execute('brctl addif br0 ' + self._net1 + ' ' + self._net2)
+
+        # add all NICs into the bridge
+        for nic in self._nics:
+            self.execute('ip addr add ' +
+                         nic['ip'] + ' dev ' + nic['device'])
+            self.execute('ip link set dev ' + nic['device'] + ' up')
+            self.execute('brctl addif br0 ' + nic['device'])
+
         self.execute('ip addr add ' +
-                     S.getValue('VANILLA_BRIDGE_IP')[self._number] +
+                     S.getValue('GUEST_BRIDGE_IP')[self._number] +
                      ' dev br0')
         self.execute('ip link set dev br0 up')
 
@@ -475,17 +484,15 @@ class IVnfQemu(IVnf):
         # Controls source route verification
         # 0 means no source validation
         self.execute('sysctl -w net.ipv4.conf.all.rp_filter=0')
-        self.execute('sysctl -w net.ipv4.conf.' + self._net1 + '.rp_filter=0')
-        self.execute('sysctl -w net.ipv4.conf.' + self._net2 + '.rp_filter=0')
+        for nic in self._nics:
+            self.execute('sysctl -w net.ipv4.conf.' + nic['device'] + '.rp_filter=0')
 
     def _set_multi_queue_nic(self):
         """
         Enable multi-queue in guest kernel with ethool.
         :return: None
         """
-        self.execute_and_wait('ethtool -L {} combined {}'.format(
-            self._net1, S.getValue('GUEST_NIC_QUEUES')))
-        self.execute_and_wait('ethtool -l {}'.format(self._net1))
-        self.execute_and_wait('ethtool -L {} combined {}'.format(
-            self._net2, S.getValue('GUEST_NIC_QUEUES')))
-        self.execute_and_wait('ethtool -l {}'.format(self._net2))
+        for nic in self._nics:
+            self.execute_and_wait('ethtool -L {} combined {}'.format(
+                nic['device'], S.getValue('GUEST_NIC_QUEUES')[self._number]))
+            self.execute_and_wait('ethtool -l {}'.format(nic['device']))
index 4913142..fc46aba 100644 (file)
@@ -31,45 +31,36 @@ class QemuDpdkVhostUser(IVnfQemu):
         super(QemuDpdkVhostUser, self).__init__()
         self._logger = logging.getLogger(__name__)
 
-        # calculate indexes of guest devices (e.g. charx, dpdkvhostuserx)
-        i = self._number * 2
-        if1 = str(i)
-        if2 = str(i + 1)
-        net1 = 'net' + str(i + 1)
-        net2 = 'net' + str(i + 2)
-
         # multi-queue values
-        if int(S.getValue('GUEST_NIC_QUEUES')):
-            queue_str = ',queues={}'.format(S.getValue('GUEST_NIC_QUEUES'))
+        if int(S.getValue('GUEST_NIC_QUEUES')[self._number]):
+            queue_str = ',queues={}'.format(S.getValue('GUEST_NIC_QUEUES')[self._number])
             mq_vector_str = ',mq=on,vectors={}'.format(
-                int(S.getValue('GUEST_NIC_QUEUES')) * 2 + 2)
+                int(S.getValue('GUEST_NIC_QUEUES')[self._number]) * 2 + 2)
         else:
             queue_str, mq_vector_str = '', ''
 
-        self._cmd += ['-chardev',
-                      'socket,id=char' + if1 +
-                      ',path=' + S.getValue('OVS_VAR_DIR') +
-                      'dpdkvhostuser' + if1,
-                      '-chardev',
-                      'socket,id=char' + if2 +
-                      ',path=' + S.getValue('OVS_VAR_DIR') +
-                      'dpdkvhostuser' + if2,
-                      '-netdev',
-                      'type=vhost-user,id=' + net1 +
-                      ',chardev=char' + if1 + ',vhostforce' + queue_str,
-                      '-device',
-                      'virtio-net-pci,mac=' +
-                      S.getValue('GUEST_NET1_MAC')[self._number] +
-                      ',netdev=' + net1 + ',csum=off,gso=off,' +
-                      'guest_tso4=off,guest_tso6=off,guest_ecn=off' +
-                      mq_vector_str,
-                      '-netdev',
-                      'type=vhost-user,id=' + net2 +
-                      ',chardev=char' + if2 + ',vhostforce' + queue_str,
-                      '-device',
-                      'virtio-net-pci,mac=' +
-                      S.getValue('GUEST_NET2_MAC')[self._number] +
-                      ',netdev=' + net2 + ',csum=off,gso=off,' +
-                      'guest_tso4=off,guest_tso6=off,guest_ecn=off' +
-                      mq_vector_str,
-                     ]
+        # calculate index of first interface, i.e. check how many
+        # interfaces has been created for previous VMs, where 1st NIC
+        # of 1st VM has index 0
+        start_index = sum(S.getValue('GUEST_NICS_NR')[:self._number])
+
+        # setup requested number of interfaces
+        for nic in range(len(self._nics)):
+            index = start_index + nic
+            ifi = str(index)
+            net = 'net' + str(index + 1)
+
+            self._cmd += ['-chardev',
+                          'socket,id=char' + ifi +
+                          ',path=' + S.getValue('OVS_VAR_DIR') +
+                          'dpdkvhostuser' + ifi,
+                          '-netdev',
+                          'type=vhost-user,id=' + net +
+                          ',chardev=char' + ifi + ',vhostforce' + queue_str,
+                          '-device',
+                          'virtio-net-pci,mac=' +
+                          self._nics[nic]['mac'] +
+                          ',netdev=' + net + ',csum=off,gso=off,' +
+                          'guest_tso4=off,guest_tso6=off,guest_ecn=off' +
+                          mq_vector_str,
+                         ]
index afb519c..efeea49 100644 (file)
@@ -36,39 +36,33 @@ class QemuVirtioNet(IVnfQemu):
         tasks.run_task(['sudo', 'modprobe', 'vhost_net'], self._logger,
                        'Loading vhost_net module...', True)
 
-        # calculate indexes of guest devices (e.g. charx, dpdkvhostuserx)
-        i = self._number * 2
-        if1 = str(i)
-        if2 = str(i + 1)
+        # calculate index of first interface, i.e. check how many
+        # interfaces has been created for previous VMs, where 1st NIC
+        # of 1st VM has index 0
+        start_index = sum(S.getValue('GUEST_NICS_NR')[:self._number])
 
         # multi-queue values
-        if int(S.getValue('GUEST_NIC_QUEUES')):
-            queue_str = ',queues={}'.format(S.getValue('GUEST_NIC_QUEUES'))
+        if int(S.getValue('GUEST_NIC_QUEUES')[self._number]):
+            queue_str = ',queues={}'.format(
+                S.getValue('GUEST_NIC_QUEUES')[self._number])
             mq_vector_str = ',mq=on,vectors={}'.format(
-                int(S.getValue('GUEST_NIC_QUEUES')) * 2 + 2)
+                int(S.getValue('GUEST_NIC_QUEUES')[self._number]) * 2 + 2)
         else:
             queue_str, mq_vector_str = '', ''
 
-        self._cmd += ['-netdev',
-                      'tap,id=' + self._net1 + queue_str +
-                      ',script=no,downscript=no,' +
-                      'ifname=tap' + if1 + ',vhost=on',
-                      '-device',
-                      'virtio-net-pci,mac=' +
-                      S.getValue('GUEST_NET1_MAC')[self._number] +
-                      ',netdev=' + self._net1 +
-                      ',csum=off,gso=off,' +
-                      'guest_tso4=off,guest_tso6=off,guest_ecn=off' +
-                      mq_vector_str,
-                      '-netdev',
-                      'tap,id=' + self._net2 + queue_str +
-                      ',script=no,downscript=no,' +
-                      'ifname=tap' + if2 + ',vhost=on',
-                      '-device',
-                      'virtio-net-pci,mac=' +
-                      S.getValue('GUEST_NET2_MAC')[self._number] +
-                      ',netdev=' + self._net2 +
-                      ',csum=off,gso=off,' +
-                      'guest_tso4=off,guest_tso6=off,guest_ecn=off' +
-                      mq_vector_str,
-        ]
+        # setup requested number of interfaces
+        for nic in range(len(self._nics)):
+            index = start_index + nic
+            ifi = str(index)
+            self._cmd += ['-netdev', 'type=tap,id=' +
+                          self._nics[nic]['device'] + queue_str +
+                          ',script=no,downscript=no,' +
+                          'ifname=tap' + ifi + ',vhost=on',
+                          '-device',
+                          'virtio-net-pci,mac=' +
+                          self._nics[nic]['mac'] + ',netdev=' +
+                          self._nics[nic]['device'] +
+                          ',csum=off,gso=off,' +
+                          'guest_tso4=off,guest_tso6=off,guest_ecn=off' +
+                          mq_vector_str,
+                         ]
index 1410a0c..242fc50 100644 (file)
@@ -36,6 +36,8 @@ class IVnf(tasks.Process):
         this call (use ``start`` method instead).
         """
         self._number = IVnf._number_vnfs
+        self._logger.debug('Initializing %s. VM with index %s',
+                           self._number + 1, self._number)
         IVnf._number_vnfs = IVnf._number_vnfs + 1
         self._log_prefix = 'vnf_%d_cmd : ' % self._number
 
index 332725e..40ca970 100644 (file)
@@ -61,7 +61,9 @@ class OvsVanilla(IVSwitchOvs):
         for i in range(self._vport_id):
             tapx = 'tap' + str(i)
             tap_cmd_list = ['sudo', 'ip', 'tuntap', 'del', tapx, 'mode', 'tap']
-            if int(settings.getValue('GUEST_NIC_QUEUES')):
+            # let's assume, that all VMs have NIC QUEUES enabled or disabled
+            # at the same time
+            if int(settings.getValue('GUEST_NIC_QUEUES')[0]):
                 tap_cmd_list += ['multi_queue']
             tasks.run_task(tap_cmd_list, self._logger, 'Deleting ' + tapx, False)
         self._vport_id = 0
@@ -112,13 +114,17 @@ class OvsVanilla(IVSwitchOvs):
         tap_name = 'tap' + str(self._vport_id)
         self._vport_id += 1
         tap_cmd_list = ['sudo', 'ip', 'tuntap', 'del', tap_name, 'mode', 'tap']
-        if int(settings.getValue('GUEST_NIC_QUEUES')):
+        # let's assume, that all VMs have NIC QUEUES enabled or disabled
+        # at the same time
+        if int(settings.getValue('GUEST_NIC_QUEUES')[0]):
             tap_cmd_list += ['multi_queue']
         tasks.run_task(tap_cmd_list, self._logger,
                        'Creating tap device...', False)
 
         tap_cmd_list = ['sudo', 'ip', 'tuntap', 'add', tap_name, 'mode', 'tap']
-        if int(settings.getValue('GUEST_NIC_QUEUES')):
+        # let's assume, that all VMs have NIC QUEUES enabled or disabled
+        # at the same time
+        if int(settings.getValue('GUEST_NIC_QUEUES')[0]):
             tap_cmd_list += ['multi_queue']
         tasks.run_task(tap_cmd_list, self._logger,
                        'Creating tap device...', False)