# Otherwise, a new flavor will be created with attributes listed below.
flavor_type: 'nfvbench.medium'
-# Custom flavor attributes
+# Custom flavor attributes for the test VM
flavor:
- # Number of vCPUs for the flavor
+ # Number of vCPUs for the flavor, must be at least 2!
vcpus: 2
# Memory for the flavor in MB
ram: 4096
"hw:cpu_policy": dedicated
"hw:mem_page_size": large
+# Enable multiqueue for all test VM interfaces (PVP and PVVP only).
+# When enabled, the test VM image will get added the property to enable
+# multiqueue (hw_vif_multiqueue_enabled='true').
+# The number of queues per interace will be set to the number of vCPUs configured for
+# the VM.
+# By default there is only 1 queue per interface
+# The max allowed queue per interface is 8.
+# The valid range for this parameter is [1..min(8, vcpu_count)]
+# When multiqueue is used the recommended setting is to set it to same value as the
+# number of vCPU used - up to a max of 8 queues.
+# Setting to a lower value than vCPU should also work. For example if using 4 vCPU and
+# vif_multiqueue_size is set to 2, openstack will create 4 queues per interface but the
+# test VM will only use the first 2 queues.
+vif_multiqueue_size: 1
+
+# Increase number of buffers allocated for VPP VM forwarder. May be needed in scenarios with large
+# number of interfaces and worker threads, or a lot of physical interfaces with multiple RSS queues.
+# Value is per CPU socket. Default is 16384.
+num_mbufs: 16384
+
# Name of the availability zone to use for the test VMs
# Must be one of the zones listed by 'nova availability-zone-list'
# availability_zone: 'nova'
# chain count consecutive IP addresses spaced by tg_gateway_ip_addrs_step will be used
# `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1
tg_gateway_ip_addrs: ['1.1.0.100', '2.2.0.100']
+ tg_gateway_ip_cidrs: ['1.1.0.0/24','2.2.0.0/24']
tg_gateway_ip_addrs_step: 0.0.0.1
# `gateway_ip_addrs`: base IPs of VNF router gateways (left and right), quantity used depends on chain count
# must correspond to the public IP on the left and right networks
# Leave empty if there is no VLAN tagging required, or specify the VLAN id to use
# for all VxLAN tunneled traffic
vtep_vlan:
- # VxLAN only: local/source vteps IP addresses for port 0 and 1 ['10.1.1.230', '10.1.1.231']
+ # VxLAN and MPLS only: local/source vteps IP addresses for port 0 and 1 ['10.1.1.230', '10.1.1.231']
src_vteps:
# VxLAN only: remote IP address of the remote VTEPs that terminate all tunnels originating from local VTEPs
dst_vtep:
-
+ # The encapsulated L3/MPLS packet needs to traverse L3 or MPLS fabric to reach to its final dst_vtep.
+ # This parameter is required to resolve first next-hop MAC address if it next-hop is not its final dst_vtep.
+ # This parameter is mandatory for MPLS only
+ vtep_gateway_ips:
# L2 ADDRESSING OF UDP PACKETS
# Lists of dest MAC addresses to use on each traffic generator port (one dest MAC per chain)
# Leave empty for PVP, PVVP, EXT with ARP
# if empty defaults to the one specified in generator_profile.cores
cores:
+# Add cache size in packet generation for TRex field engine (FE).
+# More information for TRex performance:
+# https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_tutorial_field_engine_significantly_improve_performance
+# If cache_size = 0 (or empty): no cache will be used by TRex (default)
+# If cache_size < 0: cache_size will be set to flow count value
+cache_size: 0
+# The cache size is actually limited by the number of 64B mbufs configured in the trex platform configuration (see Trex manual 6.2.2. Memory section configuration)
+# Trex will use 1 x 64B mbuf per pre-built cached packet, assuming 1 pre-built cached packet per flow, it means for very large number of flows, the number of configured mbuf_64 will need to be set accordingly.
+mbuf_64:
+
# mbuffer ratio to use for TRex (see TRex documentation for more details)
mbuf_factor: 0.2
+# A switch to disable hdrh
+# hdrh is enabled by default and requires TRex v2.58 or higher
+disable_hdrh: false
+
# -----------------------------------------------------------------------------
# These variables are not likely to be changed
# segmentation_id: 2001
# physical_network: phys_sriov1
#
-# For multi-chaining and non shared network mode (VLAN, SRIOV, VxLAN):
+# For multi-chaining and non shared network mode (VLAN, SRIOV, VxLAN, MPLS):
# - the segmentation_id field if provided must be a list of values (as many as chains)
# - segmentation_id auto-indexing:
# the segmentation_id field can also be a single value that represents the base value from which
# - the physical_network can be a single name (all VFs to be allocated on same physnet)
# of a list of physnet names to use different PFs
#
-# Example of 2-chain configuration:
-# internal_networks:
-# left:
-# segmentation_id: [2000, 2001]
-# physical_network: phys_sriov0
-# right:
-# segmentation_id: [2010, 2011]
-# physical_network: phys_sriov1
+# Example of 2-chain VLAN configuration:
+# internal_networks:
+# left:
+# segmentation_id: [2000, 2001]
+# physical_network: phys_sriov0
+# right:
+# segmentation_id: [2010, 2011]
+# physical_network: phys_sriov1
+# Equivalent to (using auto-indexing):
+# internal_networks:
+# left:
+# segmentation_id: 2000
+# physical_network: phys_sriov0
+# right:
+# segmentation_id: 2010
+# physical_network: phys_sriov1
#
-# Equivalent to (using auto-indexing):
-# internal_networks:
-# left:
-# segmentation_id: 2000
-# physical_network: phys_sriov0
-# right:
-# segmentation_id: 2010
-# physical_network: phys_sriov1
+# - mpls_transport_labels is used only when MPLS encapsulation is enabled (mpls: true)
+# this parameter doesn't support auto-indexing because this is not a typical scenario
+# expected the list of values in a range 256-1048575, one value per chain is expected
+#
+# In the bellow configuration example 'segmentation_id; contains the inner MPLS label for each chain
+# and 'mpls_transport_labels' contains the outer transport MPLS label for each chain
+# Example of 2-chain MPLS configuration:
+# internal_networks:
+# left:
+# network_type: mpls
+# segmentation_id: [2000, 2001]
+# mpls_transport_labels: [10000, 10000]
+# physical_network: phys_sriov0
+# right:
+# network_type: mpls
+# segmentation_id: [2010, 2011]
+# mpls_transport_labels: [11000, 11000]
+# physical_network: phys_sriov1
+
internal_networks:
left:
network_type: 'vlan'
segmentation_id:
physical_network:
+ mpls_transport_labels:
right:
name: 'nfvbench-rnet'
subnet: 'nfvbench-rsubnet'
network_type: 'vlan'
segmentation_id:
physical_network:
+ mpls_transport_labels:
middle:
name: 'nfvbench-mnet'
subnet: 'nfvbench-msubnet'
network_type: 'vlan'
segmentation_id:
physical_network:
+ mpls_transport_labels:
# IDLE INTERFACES: PVP, PVVP and non shared net only.
# By default each test VM will have 2 virtual interfaces for looping traffic.
# If service_chain_shared_net is true, the options below will be ignored
# and no idle interfaces will be added.
idle_networks:
- # Prefix for all idle networks
+ # Prefix for all idle networks, the final name will append the chain ID and idle index
+ # e.g. "nfvbench-idle-net.0.4" chain 0 idle index 4
name: 'nfvbench-idle-net'
- # Prefix for all idle subnetworks
+ # Subnet name to use for all idle subnetworks
subnet: 'nfvbench-idle-subnet'
# CIDR to use for all idle networks (value should not matter)
cidr: '192.169.1.0/24'
network_type: 'vlan'
# segmentation ID to use for the network attached to the idle virtual interfaces
# vlan: leave empty to let neutron pick the segmentation ID
- # vxlan: must specify the VNI value to be used (cannot be empty)
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
# Note that NFVbench will use as many consecutive segmentation IDs as needed.
# For example, for 4 PVP chains and 8 idle
# interfaces per VM, NFVbench will use 32 consecutive values of segmentation ID
# physnet name to use for all idle interfaces
physical_network:
+# MANAGEMENT INTERFACE
+# By default each test VM will have 2 virtual interfaces for looping traffic.
+# If use_management_port is true, additional virtual interface can be
+# added at VM creation time, this interface will be used for VM management over SSH.
+# This will be helpful for debug (forwarder config, capture traffic...)
+# or to emulate VNF with management interface
+use_management_port: false
+
+# If a network with given name already exists it will be reused.
+# Otherwise a new network is created for management interface.
+# If use_management_port is false, the options below will be ignored
+# and no management interface will be added.
+management_network:
+ name: 'nfvbench-management-net'
+ # Subnet name to use for management subnetwork
+ subnet: 'nfvbench-management-subnet'
+ # CIDR to use for management network
+ cidr: '192.168.0.0/24'
+ gateway: '192.168.0.254'
+ # Type of network associated to the management virtual interface (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the management virtual interface
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
+
+# Floating IP for management interface
+# If use_floating_ip is true, floating IP will be set on management interface port
+# One floating IP by loop VM will be used (floating ips are often limited,
+# use them on limited context mainly for debug). If there are 10 PVP chains, this will require 10
+# floating IPs. If 10 PVVP chains, it will require 20 floating IPs
+use_floating_ip: false
+
+# If a network with given name already exists it will be reused.
+# Set same name as management_network if you want to use a floating IP from this network
+# Otherwise set name, subnet and CIDR information from your floating IP pool network
+# Floating network used to set floating IP on management port.
+# Only 1 floating network will be used for all VMs and chains (shared network).
+# If use_floating_ip is false, the options below will be ignored
+# and no floating IP will be added.
+floating_network:
+ name: 'nfvbench-floating-net'
+ # Subnet name to use for floating subnetwork
+ subnet: 'nfvbench-floating-subnet'
+ # CIDR to use for floating network
+ cidr: '192.168.0.0/24'
+ # Type of network associated to the management virtual interface (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the management virtual interface
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
+
# In the scenario of PVVP + SRIOV, there is choice of how the traffic will be
# handled in the middle network. The default (false) will use vswitch, while
# SRIOV can be used by toggling below setting.
# external_networks:
# left: ['ext-lnet', 'ext-lnet2']
# right: ['ext-rnet', 'ext-rnet2']
-#
+#
external_networks:
left:
right:
+# PVP with L3 router in the packet path only.
+# Only use when l3_router option is True (see l3_router)
+# Prefix names of edge networks which will be used to send traffic via traffic generator.
+# If a network with given name already exists it will be reused.
+# Otherwise a new edge network will be created with that name, subnet and CIDR.
+#
+# gateway can be set in case of L3 traffic with edge networks - refer to edge_networks
+#
+# segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id
+# will be assigned by Neutron.
+# Must be unique for each network
+# physical_network can be set to pick a specific phsyical network - by default (empty) the
+# default physical network will be picked
+#
+edge_networks:
+ left:
+ name: 'nfvbench-net2'
+ router_name: 'router_left'
+ subnet: 'nfvbench-subnet2'
+ cidr: '192.168.3.0/24'
+ gateway:
+ network_type:
+ segmentation_id:
+ physical_network:
+ right:
+ name: 'nfvbench-net3'
+ router_name: 'router_right'
+ subnet: 'nfvbench-subnet3'
+ cidr: '192.168.4.0/24'
+ gateway:
+ network_type:
+ segmentation_id:
+ physical_network:
+
# Use 'true' to enable VXLAN encapsulation support and sent by the traffic generator
# When this option enabled internal networks 'network type' parameter value should be 'vxlan'
+# VxLAN and MPLS encapsulations are mutual exclusive if 'vxlan' is true then 'mpls' should be false
+# and vise versa
vxlan: false
-
+# Use 'true' to enable MPLS encapsulation support and sent by the traffic generator
+# When this option enabled internal networks 'network type' parameter value should be 'mpls'
+# MPLS and VxLAN encapsulations are mutual exclusive if 'mpls' is 'true' then 'vxlan' should be set to 'false'
+# and vise versa. no_flow_stats, no_latency_stats, no_latency_streams should be set to 'true' because these
+# features are not supported at the moment. In future when these features will be supported they will require
+# special NIC hardware. Only 2 label stack supported at the moment where one label is transport and another
+# is VPN for more details please refer to 'mpls_transport_labels' and 'segmentation_id' in networks configuration
+mpls: false
# Use 'true' to enable VLAN tagging of packets generated and sent by the traffic generator
# Leave empty or set to false if you do not want the traffic generator to insert the VLAN tag (this is
# needed for example if VLAN tagging is enabled on switch (access mode) or if you want to hook
# directly to a NIC).
# By default is set to true (which is the nominal use case with TOR and trunk mode to Trex ports)
-# If VxLAN is enabled, this option should be set to false (vlan tagging for encapsulated packets
+# If VxLAN or MPLS are enabled, this option should be set to false (vlan tagging for encapsulated packets
# is not supported). Use the vtep_vlan option to enable vlan tagging for the VxLAN overlay network.
vlan_tagging: true
# (see mac_addrs_left and mac_addrs_right)
no_arp: false
+# Loop VM (VPP forwarder) can use ARP to discover next hop mac address
+# False (default): do not send ARP but use static config devices macs instead (TRex gratuitous ARP are not interpreted by VPP)
+# True: ARP requests are sent to find out next hop MAC addresses (for instance SDN-GW)
+loop_vm_arp: false
+
# Traffic Profiles
# You can add here more profiles as needed
# `l2frame_size` can be specified in any none zero integer value to represent the size in bytes
# Can be overriden by --no-traffic
no_traffic: false
+# Use an L3 router in the packet path. This option if set will create or reuse an openstack neutron
+# router (PVP, PVVP) or reuse an existing L3 router (EXT) to route traffic to the destination VM.
+# Can be overriden by --l3-router
+l3_router: false
+
# Test configuration
# The rate pps for traffic going in reverse direction in case of unidirectional flow. Default to 1.
# Can be overriden by --no-vswitch-access
# Should be left to the default value (false)
no_vswitch_access: false
+
+
+# Enable service mode for trafic capture from TRex console (for debugging purpose)
+# Can be overriden by --service-mode
+# Should be left to the default value (false)
+service_mode: false
+
+# Disable extra flow stats (on high load traffic)
+# Can be overriden by --no-flow-stats
+# Should be left to the default value (false)
+no_flow_stats: false
+
+# Disable flow stats for latency traffic
+# Can be overriden by --no-latency-stats
+# Should be left to the default value (false)
+no_latency_stats: false
+
+# Disable latency measurements (no streams)
+# Can be overriden by --no-latency-stream
+# Should be left to the default value (false)
+no_latency_streams: false