Adds support for multi-queue using the following config.
* VNF = QemuDpdkVhostUser
* VSWITCH = OvsDpdkVhost
* Guest Loopback as testpmd
Adds CPU mask, nbcore, rxq, and txq options for testpmd.
Adds option for guest nic multi-queue.
Adds option for dpdkvhostuser and dpdk multi-queue enable
JIRA: VSPERF-309
Change-Id: I5296fc18b430eace598d8c51620fc27a6c46a65e
Signed-off-by: Christian Trautman <ctrautma@redhat.com>
# Note: VSPERF will automatically detect, which type of DPDK configuration should
# be used.
+# To enable multi queue modify the below param to the number of queues.
+# 0 = disabled
+VSWITCH_MULTI_QUEUES = 0
+
# parameters passed to ovs-vswitchd in case that OvsVanilla is selected
VSWITCHD_VANILLA_ARGS = []
# For 2 VNFs you may use [(4,5), (6, 7)]
GUEST_CORE_BINDING = [(6, 7), (9, 10)]
+# Queues per NIC inside guest for multi-queue configuration, requires switch
+# multi-queue to be enabled. Set to 0 for disabled.
+GUEST_NIC_QUEUES = 0
+
GUEST_START_TIMEOUT = 120
GUEST_OVS_DPDK_DIR = '/root/ovs_dpdk'
OVS_DPDK_SHARE = '/mnt/ovs_dpdk_share'
+# Set the CPU mask for testpmd loopback. To bind to specific guest CPUs use -l
+# GUEST_TESTPMD_CPU_MASK = '-l 0,1'
+GUEST_TESTPMD_CPU_MASK = '-c 0x3'
+
+# Testpmd multi-core config. Leave at 0's for disabled. Will not enable unless
+# GUEST_NIC_QUEUES are > 0. For bi directional traffic NB_CORES must be equal
+# to (RXQ + TXQ).
+GUEST_TESTPMD_NB_CORES = 0
+GUEST_TESTPMD_TXQ = 0
+GUEST_TESTPMD_RXQ = 0
+
# IP addresses to use for Vanilla OVS PVP testing
# Consider using RFC 2544/3330 recommended IP addresses for benchmark testing.
# Network: 198.18.0.0/15
will not be forwarded by VM and testcases with PVP and PVVP deployments
will fail. Guest loopback application is set to 'testpmd' by default.
+Multi-Queue Configuration
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+VSPerf currently supports multi-queue with the following limitations:
+
+ 1. Execution of pvp/pvvp tests require testpmd as the loopback if multi-queue
+ is enabled at the guest.
+
+ 2. Requires QemuDpdkVhostUser as the vnf.
+
+ 3. Requires switch to be set to OvsDpdkVhost.
+
+ 4. Requires QEMU 2.5 or greater and any OVS version higher than 2.5. The
+ default upstream package versions installed by VSPerf satisfy this
+ requirement.
+
+To enable multi-queue modify the ''02_vswitch.conf'' file to enable multi-queue
+on the switch.
+
+ .. code-block:: console
+
+ VSWITCH_MULTI_QUEUES = 2
+
+**NOTE:** you should consider using the switch affinity to set a pmd cpu mask
+that can optimize your performance. Consider the numa of the NIC in use if this
+applies by checking /sys/class/net/<eth_name>/device/numa_node and setting an
+appropriate mask to create PMD threads on the same numa node.
+
+When multi-queue is enabled, each dpdk or dpdkvhostuser port that is created
+on the switch will set the option for multiple queues.
+
+To enable multi-queue on the guest modify the ''04_vnf.conf'' file.
+
+ .. code-block:: console
+
+ GUEST_NIC_QUEUES = 2
+
+Enabling multi-queue at the guest will add multiple queues to each NIC port when
+qemu launches the guest.
+
+Testpmd should be configured to take advantage of multi-queue on the guest. This
+can be done by modifying the ''04_vnf.conf'' file.
+
+ .. code-block:: console
+
+ GUEST_TESTPMD_CPU_MASK = '-l 0,1,2,3,4'
+
+ GUEST_TESTPMD_NB_CORES = 4
+ GUEST_TESTPMD_TXQ = 2
+ GUEST_TESTPMD_RXQ = 2
+
+**NOTE:** The guest SMP cores must be configured to allow for testpmd to use the
+optimal number of cores to take advantage of the multiple guest queues.
+
+**NOTE:** For optimal performance guest SMPs should be on the same numa as the
+NIC in use if possible/applicable. Testpmd should be assigned at least
+(nb_cores +1) total cores with the cpu mask.
+
Executing Packet Forwarding tests
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
'/DPDK/app/test-pmd')
self.execute_and_wait('make clean')
self.execute_and_wait('make')
- self.execute_and_wait('./testpmd -c 0x3 -n 4 --socket-mem 512 --'
- ' --burst=64 -i --txqflags=0xf00 ' +
- '--disable-hw-vlan', 60, "Done")
+ if int(S.getValue('GUEST_NIC_QUEUES')):
+ self.execute_and_wait(
+ './testpmd {} -n4 --socket-mem 512 --'.format(
+ S.getValue('GUEST_TESTPMD_CPU_MASK')) +
+ ' --burst=64 -i --txqflags=0xf00 ' +
+ '--nb-cores={} --rxq={} --txq={} '.format(
+ S.getValue('GUEST_TESTPMD_NB_CORES'),
+ S.getValue('GUEST_TESTPMD_TXQ'),
+ S.getValue('GUEST_TESTPMD_RXQ')) +
+ '--disable-hw-vlan', 60, "Done")
+ else:
+ self.execute_and_wait(
+ './testpmd {} -n 4 --socket-mem 512 --'.format(
+ S.getValue('GUEST_TESTPMD_CPU_MASK')) +
+ ' --burst=64 -i --txqflags=0xf00 ' +
+ '--disable-hw-vlan', 60, "Done")
self.execute('set fwd ' + self._testpmd_fwd_mode, 1)
self.execute_and_wait('start', 20,
'TX RS bit threshold=.+ - TXQ flags=0xf00')
net1 = 'net' + str(i + 1)
net2 = 'net' + str(i + 2)
+ # multi-queue values
+ if int(S.getValue('GUEST_NIC_QUEUES')):
+ queue_str = ',queues={}'.format(S.getValue('GUEST_NIC_QUEUES'))
+ mq_vector_str = ',mq=on,vectors={}'.format(
+ int(S.getValue('GUEST_NIC_QUEUES')) * 2 + 2)
+ else:
+ queue_str, mq_vector_str = '', ''
+
self._cmd += ['-chardev',
'socket,id=char' + if1 +
',path=' + S.getValue('OVS_VAR_DIR') +
'dpdkvhostuser' + if2,
'-netdev',
'type=vhost-user,id=' + net1 +
- ',chardev=char' + if1 + ',vhostforce',
+ ',chardev=char' + if1 + ',vhostforce' + queue_str,
'-device',
'virtio-net-pci,mac=' +
S.getValue('GUEST_NET1_MAC')[self._number] +
',netdev=' + net1 + ',csum=off,gso=off,' +
- 'guest_tso4=off,guest_tso6=off,guest_ecn=off',
+ 'guest_tso4=off,guest_tso6=off,guest_ecn=off' +
+ mq_vector_str,
'-netdev',
'type=vhost-user,id=' + net2 +
- ',chardev=char' + if2 + ',vhostforce',
+ ',chardev=char' + if2 + ',vhostforce' + queue_str,
'-device',
'virtio-net-pci,mac=' +
S.getValue('GUEST_NET2_MAC')[self._number] +
',netdev=' + net2 + ',csum=off,gso=off,' +
- 'guest_tso4=off,guest_tso6=off,guest_ecn=off',
+ 'guest_tso4=off,guest_tso6=off,guest_ecn=off' +
+ mq_vector_str,
]
-
dpdk_count = self._get_port_count('type=dpdk')
port_name = 'dpdk' + str(dpdk_count)
params = ['--', 'set', 'Interface', port_name, 'type=dpdk']
+ # multi-queue enable
+ if int(settings.getValue('VSWITCH_MULTI_QUEUES')):
+ params += ['options:n_rxq={}'.format(
+ settings.getValue('VSWITCH_MULTI_QUEUES'))]
of_port = bridge.add_port(port_name, params)
-
return (port_name, of_port)
def add_vport(self, switch_name):
vhost_count = self._get_port_count('type=dpdkvhostuser')
port_name = 'dpdkvhostuser' + str(vhost_count)
params = ['--', 'set', 'Interface', port_name, 'type=dpdkvhostuser']
-
+ # multi queue enable
+ if int(settings.getValue('VSWITCH_MULTI_QUEUES')):
+ params += ['options:n_rxq={}'.format(
+ settings.getValue('VSWITCH_MULTI_QUEUES'))]
of_port = bridge.add_port(port_name, params)
return (port_name, of_port)