X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=nfvbench%2Fcfg.default.yaml;h=551b1c45da09f335a7865994661ff39adceb112d;hb=24314713446b6411cedce4329ab5ebfd6da678a2;hp=c90709f8c2d3bb9e952906cc291bb1a9b7c1da7a;hpb=f5d7345437161218d487e3771a96df29928f6c4f;p=nfvbench.git diff --git a/nfvbench/cfg.default.yaml b/nfvbench/cfg.default.yaml index c90709f..551b1c4 100755 --- a/nfvbench/cfg.default.yaml +++ b/nfvbench/cfg.default.yaml @@ -25,6 +25,11 @@ # The only case where this field can be empty is when measuring a system that does not run # OpenStack or when OpenStack APIs are not accessible or OpenStack APis use is not # desirable. In that case the EXT service chain must be used. +# +# If openrc is not admin some parameters are mandatory and must be filled with valid values in config file such as : +# - availability_zone +# - hypervisor_hostname +# - vlans openrc_file: # Forwarder to use in nfvbenchvm image. Available options: ['vpp', 'testpmd'] @@ -46,9 +51,9 @@ vm_image_file: # Otherwise, a new flavor will be created with attributes listed below. flavor_type: 'nfvbench.medium' -# Custom flavor attributes +# Custom flavor attributes for the test VM flavor: - # Number of vCPUs for the flavor + # Number of vCPUs for the flavor, must be at least 2! vcpus: 2 # Memory for the flavor in MB ram: 4096 @@ -63,14 +68,38 @@ flavor: "hw:cpu_policy": dedicated "hw:mem_page_size": large +# Enable multiqueue for all test VM interfaces (PVP and PVVP only). +# When enabled, the test VM image will get added the property to enable +# multiqueue (hw_vif_multiqueue_enabled='true'). +# The number of queues per interace will be set to the number of vCPUs configured for +# the VM. +# By default there is only 1 queue per interface +# The max allowed queue per interface is 8. +# The valid range for this parameter is [1..min(8, vcpu_count)] +# When multiqueue is used the recommended setting is to set it to same value as the +# number of vCPU used - up to a max of 8 queues. +# Setting to a lower value than vCPU should also work. For example if using 4 vCPU and +# vif_multiqueue_size is set to 2, openstack will create 4 queues per interface but the +# test VM will only use the first 2 queues. +vif_multiqueue_size: 1 + +# Increase number of buffers allocated for VPP VM forwarder. May be needed in scenarios with large +# number of interfaces and worker threads, or a lot of physical interfaces with multiple RSS queues. +# Value is per CPU socket. Default is 16384. +num_mbufs: 16384 + # Name of the availability zone to use for the test VMs # Must be one of the zones listed by 'nova availability-zone-list' # availability_zone: 'nova' +# If openrc is not admin set a valid value availability_zone: # To force placement on a given hypervisor, set the name here # (if multiple names are provided, the first will be used) # Leave empty to let openstack pick the hypervisor compute_nodes: +# If openrc is not admin set a valid value for hypervisor hostname +# Example of value: hypervisor_hostname: "server1" +hypervisor_hostname: # Type of service chain to run, possible options are PVP, PVVP and EXT # PVP - port to VM to port @@ -138,6 +167,7 @@ traffic_generator: # chain count consecutive IP addresses spaced by tg_gateway_ip_addrs_step will be used # `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1 tg_gateway_ip_addrs: ['1.1.0.100', '2.2.0.100'] + tg_gateway_ip_cidrs: ['1.1.0.0/24','2.2.0.0/24'] tg_gateway_ip_addrs_step: 0.0.0.1 # `gateway_ip_addrs`: base IPs of VNF router gateways (left and right), quantity used depends on chain count # must correspond to the public IP on the left and right networks @@ -157,13 +187,6 @@ traffic_generator: # Leave empty if there is no VLAN tagging required, or specify the VLAN id to use # for all VxLAN tunneled traffic vtep_vlan: - # VxLAN only: VNI range for VXLAN encapsulation [start_vni, end_vni] [5000, 6000] - # VNI can have a value from range 5000-16777216 - # For PVP, VNIs are allocated consecutively - 2 per each chain - # Chain 1: 5000, 5001; Chain 2: 5002, 5003; Chain X: 5000+x, 5000+x+1 - # For PVVP scenario VNIs allocated consecutively - 3 per each chain - # Chain 1: 5000, 5001, 5002; Chain 2: 5003, 5004, 5005; Chain X: 5000+x, 5000+x+1, 5000+x+1 - vnis: # VxLAN only: local/source vteps IP addresses for port 0 and 1 ['10.1.1.230', '10.1.1.231'] src_vteps: # VxLAN only: remote IP address of the remote VTEPs that terminate all tunnels originating from local VTEPs @@ -199,7 +222,7 @@ traffic_generator: # # Generator profiles are listed in the following format: # `name`: Traffic generator profile name (use a unique name, no space or special character) - # DFo not change this field + # Do not change this field # `tool`: Traffic generator tool to be used (currently supported is `TRex`). # Do not change this field # `ip`: IP address of the traffic generator. @@ -212,6 +235,13 @@ traffic_generator: # software mode, therefore the performance of TRex will be significantly # lower. ONLY applies to trex-local. # Recommended to leave the default value (false) + # `limit_memory`: Specify the memory reserved for running the TRex traffic generator (in MB). Limit the amount + # of packet memory used. (Passed to dpdk as -m arg) + # ONLY applies to trex-local. + # `zmq_pub_port`: Specify the ZMQ pub port number for the TRex traffic generator instance (default value is 4500). + # ONLY applies to trex-local. + # `zmq_rpc_port`: Specify the ZMQ rpc port for the TRex traffic generator instance (default value is 4501). + # ONLY applies to trex-local. # `interfaces`: Configuration of traffic generator interfaces. # `interfaces.port`: The port of the traffic generator to be used (leave as 0 and 1 resp.) # `interfaces.switch_port`: Leave empty (deprecated) @@ -225,12 +255,33 @@ traffic_generator: # Do not use unless you want to override the speed discovered by the # traffic generator. Expected format: 10Gbps # + # `platform`: Optional. Used to tune the performance and allocate the cores to the right NUMA. + # See https://trex-tgn.cisco.com/trex/doc/trex_manual.html (6.2.3. Platform section configuration) + # for more details + # `platform.master_thread_id`: Hardware thread_id for control thread. (Valid value is mandatory if platform property is set) + # `platform.latency_thread_id`: Hardware thread_id for RX thread. (Valid value is mandatory if platform property is set) + # `platform.dual_if`: Section defines info for interface pairs (according to the order in “interfaces” list). (Valid value is mandatory if platform property is set) + # Each section, starting with “- socket” defines info for different interface pair. (Valid value is mandatory if platform property is set) + # `platform.dual_if.socket`: The NUMA node from which memory will be allocated for use by the interface pair. (Valid value is mandatory if platform property is set) + # `platform.dual_if.threads`: Hardware threads to be used for sending packets for the interface pair. (Valid value is mandatory if platform property is set) + # Threads are pinned to cores, so specifying threads actually determines the hardware cores. + # Example of values: + # platform: + # master_thread_id: 0 + # latency_thread_id: 2 + # dual_if: + # - socket: 0 + # threads: [1] + # generator_profile: - name: trex-local tool: TRex ip: 127.0.0.1 cores: 4 software_mode: false + limit_memory: 1024 + zmq_pub_port: 4500 + zmq_rpc_port: 4501 interfaces: - port: 0 pci: @@ -239,14 +290,38 @@ traffic_generator: pci: switch_port: intf_speed: + platform: + master_thread_id: + latency_thread_id: + dual_if: + - socket: + threads: + +# Use 'true' to force restart of local TRex server before next run +# TRex local server will be restarted even if restart property is false in case of generator config changes between runs +restart: false # Simpler override for trex core count and mbuf multilier factor # if empty defaults to the one specified in generator_profile.cores cores: +# Add cache size in packet generation for TRex field engine (FE). +# More information for TRex performance: +# https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_tutorial_field_engine_significantly_improve_performance +# If cache_size = 0 (or empty): no cache will be used by TRex (default) +# If cache_size < 0: cache_size will be set to flow count value +cache_size: +# The cache size is actually limited by the number of 64B mbufs configured in the trex platform configuration (see Trex manual 6.2.2. Memory section configuration) +# Trex will use 1 x 64B mbuf per pre-built cached packet, assuming 1 pre-built cached packet per flow, it means for very large number of flows, the number of configured mbuf_64 will need to be set accordingly. +mbuf_64: + # mbuffer ratio to use for TRex (see TRex documentation for more details) mbuf_factor: 0.2 +# A switch to disable hdrh +# hdrh is enabled by default and requires TRex v2.58 or higher +disable_hdrh: false + # ----------------------------------------------------------------------------- # These variables are not likely to be changed @@ -262,7 +337,7 @@ generic_poll_sec: 2 # name of the loop VM loop_vm_name: 'nfvbench-loop-vm' -# Default names, subnets and CIDRs for PVP/PVVP networks +# Default names, subnets and CIDRs for PVP/PVVP networks (openstack only) # # If a network with given name already exists it will be reused. # - PVP only uses left and right @@ -345,23 +420,161 @@ internal_networks: segmentation_id: physical_network: +# IDLE INTERFACES: PVP, PVVP and non shared net only. +# By default each test VM will have 2 virtual interfaces for looping traffic. +# If service_chain_shared_net is false, additional virtual interfaces can be +# added at VM creation time, these interfaces will not carry any traffic and +# can be used to test the impact of idle interfaces in the overall performance. +# All these idle interfaces will use normal ports (not direct). +# Number of idle interfaces per VM (none by default) +idle_interfaces_per_vm: 0 + +# A new network is created for each idle interface. +# If service_chain_shared_net is true, the options below will be ignored +# and no idle interfaces will be added. +idle_networks: + # Prefix for all idle networks, the final name will append the chain ID and idle index + # e.g. "nfvbench-idle-net.0.4" chain 0 idle index 4 + name: 'nfvbench-idle-net' + # Subnet name to use for all idle subnetworks + subnet: 'nfvbench-idle-subnet' + # CIDR to use for all idle networks (value should not matter) + cidr: '192.169.1.0/24' + # Type of network associated to the idle virtual interfaces (vlan or vxlan) + network_type: 'vlan' + # segmentation ID to use for the network attached to the idle virtual interfaces + # vlan: leave empty to let neutron pick the segmentation ID + # vxlan: must specify the starting VNI value to be used (cannot be empty) + # Note that NFVbench will use as many consecutive segmentation IDs as needed. + # For example, for 4 PVP chains and 8 idle + # interfaces per VM, NFVbench will use 32 consecutive values of segmentation ID + # starting from the value provided. + segmentation_id: + # physnet name to use for all idle interfaces + physical_network: + +# MANAGEMENT INTERFACE +# By default each test VM will have 2 virtual interfaces for looping traffic. +# If use_management_port is true, additional virtual interface can be +# added at VM creation time, this interface will be used for VM management over SSH. +# This will be helpful for debug (forwarder config, capture traffic...) +# or to emulate VNF with management interface +use_management_port: false + +# If a network with given name already exists it will be reused. +# Otherwise a new network is created for management interface. +# If use_management_port is false, the options below will be ignored +# and no management interface will be added. +management_network: + name: 'nfvbench-management-net' + # Subnet name to use for management subnetwork + subnet: 'nfvbench-management-subnet' + # CIDR to use for management network + cidr: '192.168.0.0/24' + gateway: '192.168.0.254' + # Type of network associated to the management virtual interface (vlan or vxlan) + network_type: 'vlan' + # segmentation ID to use for the network attached to the management virtual interface + # vlan: leave empty to let neutron pick the segmentation ID + # vxlan: must specify the starting VNI value to be used (cannot be empty) + segmentation_id: + # physnet name to use for all idle interfaces + physical_network: + +# Floating IP for management interface +# If use_floating_ip is true, floating IP will be set on management interface port +# One floating IP by loop VM will be used (floating ips are often limited, +# use them on limited context mainly for debug). If there are 10 PVP chains, this will require 10 +# floating IPs. If 10 PVVP chains, it will require 20 floating IPs +use_floating_ip: false + +# If a network with given name already exists it will be reused. +# Set same name as management_network if you want to use a floating IP from this network +# Otherwise set name, subnet and CIDR information from your floating IP pool network +# Floating network used to set floating IP on management port. +# Only 1 floating network will be used for all VMs and chains (shared network). +# If use_floating_ip is false, the options below will be ignored +# and no floating IP will be added. +floating_network: + name: 'nfvbench-floating-net' + # Subnet name to use for floating subnetwork + subnet: 'nfvbench-floating-subnet' + # CIDR to use for floating network + cidr: '192.168.0.0/24' + # Type of network associated to the management virtual interface (vlan or vxlan) + network_type: 'vlan' + # segmentation ID to use for the network attached to the management virtual interface + # vlan: leave empty to let neutron pick the segmentation ID + # vxlan: must specify the starting VNI value to be used (cannot be empty) + segmentation_id: + # physnet name to use for all idle interfaces + physical_network: + # In the scenario of PVVP + SRIOV, there is choice of how the traffic will be # handled in the middle network. The default (false) will use vswitch, while # SRIOV can be used by toggling below setting. use_sriov_middle_net: false -# EXT chain only. Prefix names of edge networks which will be used to send traffic via traffic generator. +# EXT chain only. Prefix names of edge networks or list of edge network names +# used to send traffic via traffic generator. # # If service_chain_shared_net is true, the left and right networks must pre-exist and match exactly by name. # # If service_chain_shared_net is false, each chain must have its own pre-existing left and right networks. -# An index will be appended to each network name to form the final name: +# left and right can take either a string prefix or a list of arbitrary network names +# If a string prefix is passed, an index will be appended to each network name to form the final name. +# Example: +# external_networks: +# left: 'ext-lnet' +# right: 'ext-rnet' # ext-lnet0 ext-rnet0 for chain #0 # ext-lnet1 ext-rnet1 for chain #1 # etc... +# If a list of strings is passed, each string in the list must be the name of the network used for the +# chain indexed by the entry position in the list. +# The list must have at least as many entries as there are chains +# Example: +# external_networks: +# left: ['ext-lnet', 'ext-lnet2'] +# right: ['ext-rnet', 'ext-rnet2'] +# external_networks: - left: 'ext-lnet' - right: 'ext-rnet' + left: + right: + +# PVP with L3 router in the packet path only. +# Only use when l3_router option is True (see l3_router) +# Prefix names of edge networks which will be used to send traffic via traffic generator. +# If a network with given name already exists it will be reused. +# Otherwise a new edge network will be created with that name, subnet and CIDR. +# +# gateway can be set in case of L3 traffic with edge networks - refer to edge_networks +# +# segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id +# will be assigned by Neutron. +# Must be unique for each network +# physical_network can be set to pick a specific phsyical network - by default (empty) the +# default physical network will be picked +# +edge_networks: + left: + name: 'nfvbench-net2' + router_name: 'router_left' + subnet: 'nfvbench-subnet2' + cidr: '192.168.3.0/24' + gateway: + network_type: + segmentation_id: + physical_network: + right: + name: 'nfvbench-net3' + router_name: 'router_right' + subnet: 'nfvbench-subnet3' + cidr: '192.168.4.0/24' + gateway: + network_type: + segmentation_id: + physical_network: # Use 'true' to enable VXLAN encapsulation support and sent by the traffic generator # When this option enabled internal networks 'network type' parameter value should be 'vxlan' @@ -376,7 +589,7 @@ vxlan: false # is not supported). Use the vtep_vlan option to enable vlan tagging for the VxLAN overlay network. vlan_tagging: true -# Used only in the case of EXT chain and no openstack to specify the VLAN IDs to use. +# Used only in the case of EXT chain and no openstack or not admin access to specify the VLAN IDs to use. # This property is ignored when OpenStakc is used or in the case of l2-loopback. # If OpenStack is used leave the list empty, VLAN IDs are retrieved from OpenStack networks using Neutron API. # If networks are shared across all chains (service_chain_shared_net=true), the list should have exactly 2 values @@ -423,6 +636,11 @@ traffic: # Can be overriden by --no-traffic no_traffic: false +# Use an L3 router in the packet path. This option if set will create or reuse an openstack neutron +# router (PVP, PVVP) or reuse an existing L3 router (EXT) to route traffic to the destination VM. +# Can be overriden by --l3-router +l3_router: false + # Test configuration # The rate pps for traffic going in reverse direction in case of unidirectional flow. Default to 1. @@ -531,3 +749,24 @@ user_label: # Can be overriden by --no-vswitch-access # Should be left to the default value (false) no_vswitch_access: false + + +# Enable service mode for trafic capture from TRex console (for debugging purpose) +# Can be overriden by --service-mode +# Should be left to the default value (false) +service_mode: false + +# Disable extra flow stats (on high load traffic) +# Can be overriden by --no-flow-stats +# Should be left to the default value (false) +no_flow_stats: false + +# Disable flow stats for latency traffic +# Can be overriden by --no-latency-stats +# Should be left to the default value (false) +no_latency_stats: false + +# Disable latency measurements (no streams) +# Can be overriden by --no-latency-stream +# Should be left to the default value (false) +no_latency_streams: false