# test VM will only use the first 2 queues.
vif_multiqueue_size: 1
+# Increase number of buffers allocated for VPP VM forwarder. May be needed in scenarios with large
+# number of interfaces and worker threads, or a lot of physical interfaces with multiple RSS queues.
+# Value is per CPU socket. Default is 16384.
+num_mbufs: 16384
+
# Name of the availability zone to use for the test VMs
# Must be one of the zones listed by 'nova availability-zone-list'
# availability_zone: 'nova'
# chain count consecutive IP addresses spaced by tg_gateway_ip_addrs_step will be used
# `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1
tg_gateway_ip_addrs: ['1.1.0.100', '2.2.0.100']
+ tg_gateway_ip_cidrs: ['1.1.0.0/24','2.2.0.0/24']
tg_gateway_ip_addrs_step: 0.0.0.1
# `gateway_ip_addrs`: base IPs of VNF router gateways (left and right), quantity used depends on chain count
# must correspond to the public IP on the left and right networks
# if empty defaults to the one specified in generator_profile.cores
cores:
+# Add cache size in packet generation for TRex field engine (FE).
+# More information for TRex performance:
+# https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_tutorial_field_engine_significantly_improve_performance
+# If cache_size = 0 (or empty): no cache will be used by TRex (default)
+# If cache_size < 0: cache_size will be set to flow count value
+cache_size: 0
+# The cache size is actually limited by the number of 64B mbufs configured in the trex platform configuration (see Trex manual 6.2.2. Memory section configuration)
+# Trex will use 1 x 64B mbuf per pre-built cached packet, assuming 1 pre-built cached packet per flow, it means for very large number of flows, the number of configured mbuf_64 will need to be set accordingly.
+mbuf_64:
+
# mbuffer ratio to use for TRex (see TRex documentation for more details)
mbuf_factor: 0.2
+# A switch to disable hdrh
+# hdrh is enabled by default and requires TRex v2.58 or higher
+disable_hdrh: false
+
# -----------------------------------------------------------------------------
# These variables are not likely to be changed
# physnet name to use for all idle interfaces
physical_network:
+# MANAGEMENT INTERFACE
+# By default each test VM will have 2 virtual interfaces for looping traffic.
+# If use_management_port is true, additional virtual interface can be
+# added at VM creation time, this interface will be used for VM management over SSH.
+# This will be helpful for debug (forwarder config, capture traffic...)
+# or to emulate VNF with management interface
+use_management_port: false
+
+# If a network with given name already exists it will be reused.
+# Otherwise a new network is created for management interface.
+# If use_management_port is false, the options below will be ignored
+# and no management interface will be added.
+management_network:
+ name: 'nfvbench-management-net'
+ # Subnet name to use for management subnetwork
+ subnet: 'nfvbench-management-subnet'
+ # CIDR to use for management network
+ cidr: '192.168.0.0/24'
+ gateway: '192.168.0.254'
+ # Type of network associated to the management virtual interface (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the management virtual interface
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
+
+# Floating IP for management interface
+# If use_floating_ip is true, floating IP will be set on management interface port
+# One floating IP by loop VM will be used (floating ips are often limited,
+# use them on limited context mainly for debug). If there are 10 PVP chains, this will require 10
+# floating IPs. If 10 PVVP chains, it will require 20 floating IPs
+use_floating_ip: false
+
+# If a network with given name already exists it will be reused.
+# Set same name as management_network if you want to use a floating IP from this network
+# Otherwise set name, subnet and CIDR information from your floating IP pool network
+# Floating network used to set floating IP on management port.
+# Only 1 floating network will be used for all VMs and chains (shared network).
+# If use_floating_ip is false, the options below will be ignored
+# and no floating IP will be added.
+floating_network:
+ name: 'nfvbench-floating-net'
+ # Subnet name to use for floating subnetwork
+ subnet: 'nfvbench-floating-subnet'
+ # CIDR to use for floating network
+ cidr: '192.168.0.0/24'
+ # Type of network associated to the management virtual interface (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the management virtual interface
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
+
# In the scenario of PVVP + SRIOV, there is choice of how the traffic will be
# handled in the middle network. The default (false) will use vswitch, while
# SRIOV can be used by toggling below setting.
left:
right:
+# PVP with L3 router in the packet path only.
+# Only use when l3_router option is True (see l3_router)
+# Prefix names of edge networks which will be used to send traffic via traffic generator.
+# If a network with given name already exists it will be reused.
+# Otherwise a new edge network will be created with that name, subnet and CIDR.
+#
+# gateway can be set in case of L3 traffic with edge networks - refer to edge_networks
+#
+# segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id
+# will be assigned by Neutron.
+# Must be unique for each network
+# physical_network can be set to pick a specific phsyical network - by default (empty) the
+# default physical network will be picked
+#
+edge_networks:
+ left:
+ name: 'nfvbench-net2'
+ router_name: 'router_left'
+ subnet: 'nfvbench-subnet2'
+ cidr: '192.168.3.0/24'
+ gateway:
+ network_type:
+ segmentation_id:
+ physical_network:
+ right:
+ name: 'nfvbench-net3'
+ router_name: 'router_right'
+ subnet: 'nfvbench-subnet3'
+ cidr: '192.168.4.0/24'
+ gateway:
+ network_type:
+ segmentation_id:
+ physical_network:
+
# Use 'true' to enable VXLAN encapsulation support and sent by the traffic generator
# When this option enabled internal networks 'network type' parameter value should be 'vxlan'
vxlan: false
# Can be overriden by --no-traffic
no_traffic: false
+# Use an L3 router in the packet path. This option if set will create or reuse an openstack neutron
+# router (PVP, PVVP) or reuse an existing L3 router (EXT) to route traffic to the destination VM.
+# Can be overriden by --l3-router
+l3_router: false
+
# Test configuration
# The rate pps for traffic going in reverse direction in case of unidirectional flow. Default to 1.
# Can be overriden by --no-vswitch-access
# Should be left to the default value (false)
no_vswitch_access: false
+
+
+# Enable service mode for trafic capture from TRex console (for debugging purpose)
+# Can be overriden by --service-mode
+# Should be left to the default value (false)
+service_mode: false
+
+# Disable extra flow stats (on high load traffic)
+# Can be overriden by --no-flow-stats
+# Should be left to the default value (false)
+no_flow_stats: false
+
+# Disable flow stats for latency traffic
+# Can be overriden by --no-latency-stats
+# Should be left to the default value (false)
+no_latency_stats: false
+
+# Disable latency measurements (no streams)
+# Can be overriden by --no-latency-stream
+# Should be left to the default value (false)
+no_latency_streams: false