X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=nfvbench%2Fcfg.default.yaml;h=d3a3fb9659e9270d1f8862604fc50ba4df23693c;hb=95f2491ed89ac99b0d8bd006b4a13cbeb1eb96ce;hp=eb5fa117f5f9f86238a30e41628b2e8a38586de1;hpb=7439c895c0ad2960712910ab6b72042884ce2a41;p=nfvbench.git diff --git a/nfvbench/cfg.default.yaml b/nfvbench/cfg.default.yaml index eb5fa11..d3a3fb9 100755 --- a/nfvbench/cfg.default.yaml +++ b/nfvbench/cfg.default.yaml @@ -51,9 +51,9 @@ vm_image_file: # Otherwise, a new flavor will be created with attributes listed below. flavor_type: 'nfvbench.medium' -# Custom flavor attributes +# Custom flavor attributes for the test VM flavor: - # Number of vCPUs for the flavor + # Number of vCPUs for the flavor, must be at least 2! vcpus: 2 # Memory for the flavor in MB ram: 4096 @@ -68,6 +68,26 @@ flavor: "hw:cpu_policy": dedicated "hw:mem_page_size": large +# Enable multiqueue for all test VM interfaces (PVP and PVVP only). +# When enabled, the test VM image will get added the property to enable +# multiqueue (hw_vif_multiqueue_enabled='true'). +# The number of queues per interace will be set to the number of vCPUs configured for +# the VM. +# By default there is only 1 queue per interface +# The max allowed queue per interface is 8. +# The valid range for this parameter is [1..min(8, vcpu_count)] +# When multiqueue is used the recommended setting is to set it to same value as the +# number of vCPU used - up to a max of 8 queues. +# Setting to a lower value than vCPU should also work. For example if using 4 vCPU and +# vif_multiqueue_size is set to 2, openstack will create 4 queues per interface but the +# test VM will only use the first 2 queues. +vif_multiqueue_size: 1 + +# Increase number of buffers allocated for VPP VM forwarder. May be needed in scenarios with large +# number of interfaces and worker threads, or a lot of physical interfaces with multiple RSS queues. +# Value is per CPU socket. Default is 16384. +num_mbufs: 16384 + # Name of the availability zone to use for the test VMs # Must be one of the zones listed by 'nova availability-zone-list' # availability_zone: 'nova' @@ -147,6 +167,7 @@ traffic_generator: # chain count consecutive IP addresses spaced by tg_gateway_ip_addrs_step will be used # `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1 tg_gateway_ip_addrs: ['1.1.0.100', '2.2.0.100'] + tg_gateway_ip_cidrs: ['1.1.0.0/24','2.2.0.0/24'] tg_gateway_ip_addrs_step: 0.0.0.1 # `gateway_ip_addrs`: base IPs of VNF router gateways (left and right), quantity used depends on chain count # must correspond to the public IP on the left and right networks @@ -284,9 +305,23 @@ restart: false # if empty defaults to the one specified in generator_profile.cores cores: +# Add cache size in packet generation for TRex field engine (FE). +# More information for TRex performance: +# https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_tutorial_field_engine_significantly_improve_performance +# If cache_size = 0 (or empty): no cache will be used by TRex (default) +# If cache_size < 0: cache_size will be set to flow count value +cache_size: 0 +# The cache size is actually limited by the number of 64B mbufs configured in the trex platform configuration (see Trex manual 6.2.2. Memory section configuration) +# Trex will use 1 x 64B mbuf per pre-built cached packet, assuming 1 pre-built cached packet per flow, it means for very large number of flows, the number of configured mbuf_64 will need to be set accordingly. +mbuf_64: + # mbuffer ratio to use for TRex (see TRex documentation for more details) mbuf_factor: 0.2 +# A switch to disable hdrh +# hdrh is enabled by default and requires TRex v2.58 or higher +disable_hdrh: false + # ----------------------------------------------------------------------------- # These variables are not likely to be changed @@ -398,9 +433,10 @@ idle_interfaces_per_vm: 0 # If service_chain_shared_net is true, the options below will be ignored # and no idle interfaces will be added. idle_networks: - # Prefix for all idle networks + # Prefix for all idle networks, the final name will append the chain ID and idle index + # e.g. "nfvbench-idle-net.0.4" chain 0 idle index 4 name: 'nfvbench-idle-net' - # Prefix for all idle subnetworks + # Subnet name to use for all idle subnetworks subnet: 'nfvbench-idle-subnet' # CIDR to use for all idle networks (value should not matter) cidr: '192.169.1.0/24' @@ -408,7 +444,7 @@ idle_networks: network_type: 'vlan' # segmentation ID to use for the network attached to the idle virtual interfaces # vlan: leave empty to let neutron pick the segmentation ID - # vxlan: must specify the VNI value to be used (cannot be empty) + # vxlan: must specify the starting VNI value to be used (cannot be empty) # Note that NFVbench will use as many consecutive segmentation IDs as needed. # For example, for 4 PVP chains and 8 idle # interfaces per VM, NFVbench will use 32 consecutive values of segmentation ID @@ -417,6 +453,63 @@ idle_networks: # physnet name to use for all idle interfaces physical_network: +# MANAGEMENT INTERFACE +# By default each test VM will have 2 virtual interfaces for looping traffic. +# If use_management_port is true, additional virtual interface can be +# added at VM creation time, this interface will be used for VM management over SSH. +# This will be helpful for debug (forwarder config, capture traffic...) +# or to emulate VNF with management interface +use_management_port: false + +# If a network with given name already exists it will be reused. +# Otherwise a new network is created for management interface. +# If use_management_port is false, the options below will be ignored +# and no management interface will be added. +management_network: + name: 'nfvbench-management-net' + # Subnet name to use for management subnetwork + subnet: 'nfvbench-management-subnet' + # CIDR to use for management network + cidr: '192.168.0.0/24' + gateway: '192.168.0.254' + # Type of network associated to the management virtual interface (vlan or vxlan) + network_type: 'vlan' + # segmentation ID to use for the network attached to the management virtual interface + # vlan: leave empty to let neutron pick the segmentation ID + # vxlan: must specify the starting VNI value to be used (cannot be empty) + segmentation_id: + # physnet name to use for all idle interfaces + physical_network: + +# Floating IP for management interface +# If use_floating_ip is true, floating IP will be set on management interface port +# One floating IP by loop VM will be used (floating ips are often limited, +# use them on limited context mainly for debug). If there are 10 PVP chains, this will require 10 +# floating IPs. If 10 PVVP chains, it will require 20 floating IPs +use_floating_ip: false + +# If a network with given name already exists it will be reused. +# Set same name as management_network if you want to use a floating IP from this network +# Otherwise set name, subnet and CIDR information from your floating IP pool network +# Floating network used to set floating IP on management port. +# Only 1 floating network will be used for all VMs and chains (shared network). +# If use_floating_ip is false, the options below will be ignored +# and no floating IP will be added. +floating_network: + name: 'nfvbench-floating-net' + # Subnet name to use for floating subnetwork + subnet: 'nfvbench-floating-subnet' + # CIDR to use for floating network + cidr: '192.168.0.0/24' + # Type of network associated to the management virtual interface (vlan or vxlan) + network_type: 'vlan' + # segmentation ID to use for the network attached to the management virtual interface + # vlan: leave empty to let neutron pick the segmentation ID + # vxlan: must specify the starting VNI value to be used (cannot be empty) + segmentation_id: + # physnet name to use for all idle interfaces + physical_network: + # In the scenario of PVVP + SRIOV, there is choice of how the traffic will be # handled in the middle network. The default (false) will use vswitch, while # SRIOV can be used by toggling below setting. @@ -449,6 +542,40 @@ external_networks: left: right: +# PVP with L3 router in the packet path only. +# Only use when l3_router option is True (see l3_router) +# Prefix names of edge networks which will be used to send traffic via traffic generator. +# If a network with given name already exists it will be reused. +# Otherwise a new edge network will be created with that name, subnet and CIDR. +# +# gateway can be set in case of L3 traffic with edge networks - refer to edge_networks +# +# segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id +# will be assigned by Neutron. +# Must be unique for each network +# physical_network can be set to pick a specific phsyical network - by default (empty) the +# default physical network will be picked +# +edge_networks: + left: + name: 'nfvbench-net2' + router_name: 'router_left' + subnet: 'nfvbench-subnet2' + cidr: '192.168.3.0/24' + gateway: + network_type: + segmentation_id: + physical_network: + right: + name: 'nfvbench-net3' + router_name: 'router_right' + subnet: 'nfvbench-subnet3' + cidr: '192.168.4.0/24' + gateway: + network_type: + segmentation_id: + physical_network: + # Use 'true' to enable VXLAN encapsulation support and sent by the traffic generator # When this option enabled internal networks 'network type' parameter value should be 'vxlan' vxlan: false @@ -509,6 +636,11 @@ traffic: # Can be overriden by --no-traffic no_traffic: false +# Use an L3 router in the packet path. This option if set will create or reuse an openstack neutron +# router (PVP, PVVP) or reuse an existing L3 router (EXT) to route traffic to the destination VM. +# Can be overriden by --l3-router +l3_router: false + # Test configuration # The rate pps for traffic going in reverse direction in case of unidirectional flow. Default to 1. @@ -617,3 +749,24 @@ user_label: # Can be overriden by --no-vswitch-access # Should be left to the default value (false) no_vswitch_access: false + + +# Enable service mode for trafic capture from TRex console (for debugging purpose) +# Can be overriden by --service-mode +# Should be left to the default value (false) +service_mode: false + +# Disable extra flow stats (on high load traffic) +# Can be overriden by --no-flow-stats +# Should be left to the default value (false) +no_flow_stats: false + +# Disable flow stats for latency traffic +# Can be overriden by --no-latency-stats +# Should be left to the default value (false) +no_latency_stats: false + +# Disable latency measurements (no streams) +# Can be overriden by --no-latency-stream +# Should be left to the default value (false) +no_latency_streams: false