X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=nfvbench%2Fcfg.default.yaml;h=d1543180a3f87d79c99c1e16e5e88b904a674082;hb=057486b092e0a4bb1989121588eb5f8afdb8e1d3;hp=bc40af4e1131299a6eb12ab94987e8ae891f4988;hpb=ed33e56c9b8771ed9d22fc3cc19e049a76137db8;p=nfvbench.git diff --git a/nfvbench/cfg.default.yaml b/nfvbench/cfg.default.yaml index bc40af4..d154318 100755 --- a/nfvbench/cfg.default.yaml +++ b/nfvbench/cfg.default.yaml @@ -25,6 +25,11 @@ # The only case where this field can be empty is when measuring a system that does not run # OpenStack or when OpenStack APIs are not accessible or OpenStack APis use is not # desirable. In that case the EXT service chain must be used. +# +# If openrc is not admin some parameters are mandatory and must be filled with valid values in config file such as : +# - availability_zone +# - hypervisor_hostname +# - vlans openrc_file: # Forwarder to use in nfvbenchvm image. Available options: ['vpp', 'testpmd'] @@ -46,9 +51,9 @@ vm_image_file: # Otherwise, a new flavor will be created with attributes listed below. flavor_type: 'nfvbench.medium' -# Custom flavor attributes +# Custom flavor attributes for the test VM flavor: - # Number of vCPUs for the flavor + # Number of vCPUs for the flavor, must be at least 2! vcpus: 2 # Memory for the flavor in MB ram: 4096 @@ -63,14 +68,38 @@ flavor: "hw:cpu_policy": dedicated "hw:mem_page_size": large +# Enable multiqueue for all test VM interfaces (PVP and PVVP only). +# When enabled, the test VM image will get added the property to enable +# multiqueue (hw_vif_multiqueue_enabled='true'). +# The number of queues per interace will be set to the number of vCPUs configured for +# the VM. +# By default there is only 1 queue per interface +# The max allowed queue per interface is 8. +# The valid range for this parameter is [1..min(8, vcpu_count)] +# When multiqueue is used the recommended setting is to set it to same value as the +# number of vCPU used - up to a max of 8 queues. +# Setting to a lower value than vCPU should also work. For example if using 4 vCPU and +# vif_multiqueue_size is set to 2, openstack will create 4 queues per interface but the +# test VM will only use the first 2 queues. +vif_multiqueue_size: 1 + +# Increase number of buffers allocated for VPP VM forwarder. May be needed in scenarios with large +# number of interfaces and worker threads, or a lot of physical interfaces with multiple RSS queues. +# Value is per CPU socket. Default is 16384. +num_mbufs: 16384 + # Name of the availability zone to use for the test VMs # Must be one of the zones listed by 'nova availability-zone-list' # availability_zone: 'nova' +# If openrc is not admin set a valid value availability_zone: # To force placement on a given hypervisor, set the name here # (if multiple names are provided, the first will be used) # Leave empty to let openstack pick the hypervisor compute_nodes: +# If openrc is not admin set a valid value for hypervisor hostname +# Example of value: hypervisor_hostname: "server1" +hypervisor_hostname: # Type of service chain to run, possible options are PVP, PVVP and EXT # PVP - port to VM to port @@ -134,10 +163,17 @@ traffic_generator: # `ip_addrs_step`: step for generating IP sequence. Use "random" for random patterns, default is 0.0.0.1. ip_addrs: ['10.0.0.0/8', '20.0.0.0/8'] ip_addrs_step: 0.0.0.1 + + #'ip_src_static': an attribute to precise the state of source IP during the generation of traffic, It indicates whether + # the IP source variate or remain constant. Use True for constant IP and False for varying IPs. + # default value is True + ip_src_static: True + # `tg_gateway_ip_addrs` base IP for traffic generator ports in the left and right networks to the VNFs # chain count consecutive IP addresses spaced by tg_gateway_ip_addrs_step will be used # `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1 tg_gateway_ip_addrs: ['1.1.0.100', '2.2.0.100'] + tg_gateway_ip_cidrs: ['1.1.0.0/24','2.2.0.0/24'] tg_gateway_ip_addrs_step: 0.0.0.1 # `gateway_ip_addrs`: base IPs of VNF router gateways (left and right), quantity used depends on chain count # must correspond to the public IP on the left and right networks @@ -147,11 +183,41 @@ traffic_generator: # `gateway_ip_addrs_step`: step for generating router gateway sequences. default is 0.0.0.1 gateway_ip_addrs: ['1.1.0.2', '2.2.0.2'] gateway_ip_addrs_step: 0.0.0.1 + + # UDP DEFINED VARIABLES + # TRex pick default UDP port (53) but the range of UDP source and destination ports are also + # defined from configuration file by using the following attributes: + # # `udp_src_port`: the source port for sending UDP traffic, default is picked by TRex (53) # `udp_dst_port`: the destination port for sending UDP traffic, default is picked by TRex (53) + # `udp_src_port` and `udp_dst_port` can be defined by a single port or a range. Example: + # udp_src_port: 80 + # udp_dst_port: ['1024','65000'] + # `udp_port_step`: the step between two generated ports, default is equal to '1' + # + # NOTICE: + # Following TRex functionalities, incrementation and decrementation of source port and destination + # port values occur simultaneously. + # So, in order to reach the highest possible number of packets, it's recommended that the range of source ports + # minus the range of destination ports should be different of 1 + # i.e: |range[source_port] - range[destination_port]| = 1 udp_src_port: udp_dst_port: - + udp_port_step: '1' + + # VxLAN only: optionally specify what VLAN tag to use for the VxLAN overlay + # This is used if the vxlan tunnels are running on a specific VLAN. + # Leave empty if there is no VLAN tagging required, or specify the VLAN id to use + # for all VxLAN tunneled traffic + vtep_vlan: + # VxLAN and MPLS only: local/source vteps IP addresses for port 0 and 1 ['10.1.1.230', '10.1.1.231'] + src_vteps: + # VxLAN only: remote IP address of the remote VTEPs that terminate all tunnels originating from local VTEPs + dst_vtep: + # The encapsulated L3/MPLS packet needs to traverse L3 or MPLS fabric to reach to its final dst_vtep. + # This parameter is required to resolve first next-hop MAC address if it next-hop is not its final dst_vtep. + # This parameter is mandatory for MPLS only + vtep_gateway_ips: # L2 ADDRESSING OF UDP PACKETS # Lists of dest MAC addresses to use on each traffic generator port (one dest MAC per chain) # Leave empty for PVP, PVVP, EXT with ARP @@ -182,7 +248,7 @@ traffic_generator: # # Generator profiles are listed in the following format: # `name`: Traffic generator profile name (use a unique name, no space or special character) - # DFo not change this field + # Do not change this field # `tool`: Traffic generator tool to be used (currently supported is `TRex`). # Do not change this field # `ip`: IP address of the traffic generator. @@ -195,6 +261,13 @@ traffic_generator: # software mode, therefore the performance of TRex will be significantly # lower. ONLY applies to trex-local. # Recommended to leave the default value (false) + # `limit_memory`: Specify the memory reserved for running the TRex traffic generator (in MB). Limit the amount + # of packet memory used. (Passed to dpdk as -m arg) + # ONLY applies to trex-local. + # `zmq_pub_port`: Specify the ZMQ pub port number for the TRex traffic generator instance (default value is 4500). + # ONLY applies to trex-local. + # `zmq_rpc_port`: Specify the ZMQ rpc port for the TRex traffic generator instance (default value is 4501). + # ONLY applies to trex-local. # `interfaces`: Configuration of traffic generator interfaces. # `interfaces.port`: The port of the traffic generator to be used (leave as 0 and 1 resp.) # `interfaces.switch_port`: Leave empty (deprecated) @@ -208,12 +281,33 @@ traffic_generator: # Do not use unless you want to override the speed discovered by the # traffic generator. Expected format: 10Gbps # + # `platform`: Optional. Used to tune the performance and allocate the cores to the right NUMA. + # See https://trex-tgn.cisco.com/trex/doc/trex_manual.html (6.2.3. Platform section configuration) + # for more details + # `platform.master_thread_id`: Hardware thread_id for control thread. (Valid value is mandatory if platform property is set) + # `platform.latency_thread_id`: Hardware thread_id for RX thread. (Valid value is mandatory if platform property is set) + # `platform.dual_if`: Section defines info for interface pairs (according to the order in “interfaces” list). (Valid value is mandatory if platform property is set) + # Each section, starting with “- socket” defines info for different interface pair. (Valid value is mandatory if platform property is set) + # `platform.dual_if.socket`: The NUMA node from which memory will be allocated for use by the interface pair. (Valid value is mandatory if platform property is set) + # `platform.dual_if.threads`: Hardware threads to be used for sending packets for the interface pair. (Valid value is mandatory if platform property is set) + # Threads are pinned to cores, so specifying threads actually determines the hardware cores. + # Example of values: + # platform: + # master_thread_id: 0 + # latency_thread_id: 2 + # dual_if: + # - socket: 0 + # threads: [1] + # generator_profile: - name: trex-local tool: TRex ip: 127.0.0.1 - cores: 3 + cores: 4 software_mode: false + limit_memory: 1024 + zmq_pub_port: 4500 + zmq_rpc_port: 4501 interfaces: - port: 0 pci: @@ -222,6 +316,37 @@ traffic_generator: pci: switch_port: intf_speed: + platform: + master_thread_id: + latency_thread_id: + dual_if: + - socket: + threads: + +# Use 'true' to force restart of local TRex server before next run +# TRex local server will be restarted even if restart property is false in case of generator config changes between runs +restart: false + +# Simpler override for trex core count and mbuf multilier factor +# if empty defaults to the one specified in generator_profile.cores +cores: + +# Add cache size in packet generation for TRex field engine (FE). +# More information for TRex performance: +# https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_tutorial_field_engine_significantly_improve_performance +# If cache_size = 0 (or empty): no cache will be used by TRex (default) +# If cache_size < 0: cache_size will be set to flow count value +cache_size: 0 +# The cache size is actually limited by the number of 64B mbufs configured in the trex platform configuration (see Trex manual 6.2.2. Memory section configuration) +# Trex will use 1 x 64B mbuf per pre-built cached packet, assuming 1 pre-built cached packet per flow, it means for very large number of flows, the number of configured mbuf_64 will need to be set accordingly. +mbuf_64: + +# mbuffer ratio to use for TRex (see TRex documentation for more details) +mbuf_factor: 0.2 + +# A switch to disable hdrh +# hdrh is enabled by default and requires TRex v2.58 or higher +disable_hdrh: false # ----------------------------------------------------------------------------- # These variables are not likely to be changed @@ -238,21 +363,28 @@ generic_poll_sec: 2 # name of the loop VM loop_vm_name: 'nfvbench-loop-vm' -# Default names, subnets and CIDRs for PVP/PVVP networks +# Default names, subnets and CIDRs for PVP/PVVP networks (openstack only) +# # If a network with given name already exists it will be reused. # - PVP only uses left and right # - PVVP uses left, middle and right # - for EXT chains, this structure is not relevant - refer to external_networks # Otherwise a new internal network will be created with that name, subnet and CIDR. # -# segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id -# will be assigned by Neutron. -# Must be unique for each network +# network_type must be 'vlan' (for VLAN and SRIOV) or 'vxlan' (for VxLAN) +# all 3 networks must use the same network type in this release +# segmentation_id can be set to enforce a specific segmentation id (vlan ID or VNI if vxlan) +# by default (empty) the segmentation id will be assigned by Neutron. +# If specified, it must be unique for each network +# For multi-chaining, see notes below # physical_network can be set to pick a specific phsyical network - by default (empty) the # default physical network will be picked -# In the case of SR-IOV, both physical_network and segmentation ID must be provided -# For example to setup PVP using 2 different SR-IOV ports, you must put the appropriate physnet +# SR-IOV: both physical_network and VLAN segmentation ID must be provided +# VxLAN: the VNI must generally be provided (except special Neutron VxLAN implementations) +# +# For example to setup 1xPVP using 2 different SR-IOV ports, you must put the appropriate physnet # names under left.physical_network and right.physical_network. +# For multi-chaining and non shared networks, # Example of override configuration to force PVP to run on 2 SRIOV ports (phys_sriov0 and phys_sriov1) # using VLAN ID 2000 and 2001: # internal_networks: @@ -262,6 +394,53 @@ loop_vm_name: 'nfvbench-loop-vm' # right: # segmentation_id: 2001 # physical_network: phys_sriov1 +# +# For multi-chaining and non shared network mode (VLAN, SRIOV, VxLAN, MPLS): +# - the segmentation_id field if provided must be a list of values (as many as chains) +# - segmentation_id auto-indexing: +# the segmentation_id field can also be a single value that represents the base value from which +# values for each chain is derived using the chain ID as an offset. For example +# if 2000 is specified, NFVbench will use 2000 for chain 0, 2001 for chain 1 etc... +# The ranges of all the networks must not overlap. +# - the physical_network can be a single name (all VFs to be allocated on same physnet) +# of a list of physnet names to use different PFs +# +# Example of 2-chain VLAN configuration: +# internal_networks: +# left: +# segmentation_id: [2000, 2001] +# physical_network: phys_sriov0 +# right: +# segmentation_id: [2010, 2011] +# physical_network: phys_sriov1 +# Equivalent to (using auto-indexing): +# internal_networks: +# left: +# segmentation_id: 2000 +# physical_network: phys_sriov0 +# right: +# segmentation_id: 2010 +# physical_network: phys_sriov1 +# +# - mpls_transport_labels is used only when MPLS encapsulation is enabled (mpls: true) +# this parameter doesn't support auto-indexing because this is not a typical scenario +# expected the list of values in a range 256-1048575, one value per chain is expected +# +# In the bellow configuration example 'segmentation_id; contains the inner MPLS label for each chain +# and 'mpls_transport_labels' contains the outer transport MPLS label for each chain +# Example of 2-chain MPLS configuration: +# internal_networks: +# left: +# network_type: mpls +# segmentation_id: [2000, 2001] +# mpls_transport_labels: [10000, 10000] +# physical_network: phys_sriov0 +# right: +# network_type: mpls +# segmentation_id: [2010, 2011] +# mpls_transport_labels: [11000, 11000] +# physical_network: phys_sriov1 + internal_networks: left: @@ -271,6 +450,7 @@ internal_networks: network_type: 'vlan' segmentation_id: physical_network: + mpls_transport_labels: right: name: 'nfvbench-rnet' subnet: 'nfvbench-rsubnet' @@ -278,6 +458,7 @@ internal_networks: network_type: 'vlan' segmentation_id: physical_network: + mpls_transport_labels: middle: name: 'nfvbench-mnet' subnet: 'nfvbench-msubnet' @@ -285,33 +466,187 @@ internal_networks: network_type: 'vlan' segmentation_id: physical_network: + mpls_transport_labels: + +# IDLE INTERFACES: PVP, PVVP and non shared net only. +# By default each test VM will have 2 virtual interfaces for looping traffic. +# If service_chain_shared_net is false, additional virtual interfaces can be +# added at VM creation time, these interfaces will not carry any traffic and +# can be used to test the impact of idle interfaces in the overall performance. +# All these idle interfaces will use normal ports (not direct). +# Number of idle interfaces per VM (none by default) +idle_interfaces_per_vm: 0 + +# A new network is created for each idle interface. +# If service_chain_shared_net is true, the options below will be ignored +# and no idle interfaces will be added. +idle_networks: + # Prefix for all idle networks, the final name will append the chain ID and idle index + # e.g. "nfvbench-idle-net.0.4" chain 0 idle index 4 + name: 'nfvbench-idle-net' + # Subnet name to use for all idle subnetworks + subnet: 'nfvbench-idle-subnet' + # CIDR to use for all idle networks (value should not matter) + cidr: '192.169.1.0/24' + # Type of network associated to the idle virtual interfaces (vlan or vxlan) + network_type: 'vlan' + # segmentation ID to use for the network attached to the idle virtual interfaces + # vlan: leave empty to let neutron pick the segmentation ID + # vxlan: must specify the starting VNI value to be used (cannot be empty) + # Note that NFVbench will use as many consecutive segmentation IDs as needed. + # For example, for 4 PVP chains and 8 idle + # interfaces per VM, NFVbench will use 32 consecutive values of segmentation ID + # starting from the value provided. + segmentation_id: + # physnet name to use for all idle interfaces + physical_network: + +# MANAGEMENT INTERFACE +# By default each test VM will have 2 virtual interfaces for looping traffic. +# If use_management_port is true, additional virtual interface can be +# added at VM creation time, this interface will be used for VM management over SSH. +# This will be helpful for debug (forwarder config, capture traffic...) +# or to emulate VNF with management interface +use_management_port: false + +# If a network with given name already exists it will be reused. +# Otherwise a new network is created for management interface. +# If use_management_port is false, the options below will be ignored +# and no management interface will be added. +management_network: + name: 'nfvbench-management-net' + # Subnet name to use for management subnetwork + subnet: 'nfvbench-management-subnet' + # CIDR to use for management network + cidr: '192.168.0.0/24' + gateway: '192.168.0.254' + # Type of network associated to the management virtual interface (vlan or vxlan) + network_type: 'vlan' + # segmentation ID to use for the network attached to the management virtual interface + # vlan: leave empty to let neutron pick the segmentation ID + # vxlan: must specify the starting VNI value to be used (cannot be empty) + segmentation_id: + # physnet name to use for all idle interfaces + physical_network: + +# Floating IP for management interface +# If use_floating_ip is true, floating IP will be set on management interface port +# One floating IP by loop VM will be used (floating ips are often limited, +# use them on limited context mainly for debug). If there are 10 PVP chains, this will require 10 +# floating IPs. If 10 PVVP chains, it will require 20 floating IPs +use_floating_ip: false + +# If a network with given name already exists it will be reused. +# Set same name as management_network if you want to use a floating IP from this network +# Otherwise set name, subnet and CIDR information from your floating IP pool network +# Floating network used to set floating IP on management port. +# Only 1 floating network will be used for all VMs and chains (shared network). +# If use_floating_ip is false, the options below will be ignored +# and no floating IP will be added. +floating_network: + name: 'nfvbench-floating-net' + # Subnet name to use for floating subnetwork + subnet: 'nfvbench-floating-subnet' + # CIDR to use for floating network + cidr: '192.168.0.0/24' + # Type of network associated to the management virtual interface (vlan or vxlan) + network_type: 'vlan' + # segmentation ID to use for the network attached to the management virtual interface + # vlan: leave empty to let neutron pick the segmentation ID + # vxlan: must specify the starting VNI value to be used (cannot be empty) + segmentation_id: + # physnet name to use for all idle interfaces + physical_network: # In the scenario of PVVP + SRIOV, there is choice of how the traffic will be # handled in the middle network. The default (false) will use vswitch, while # SRIOV can be used by toggling below setting. use_sriov_middle_net: false -# EXT chain only. Prefix names of edge networks which will be used to send traffic via traffic generator. +# EXT chain only. Prefix names of edge networks or list of edge network names +# used to send traffic via traffic generator. # # If service_chain_shared_net is true, the left and right networks must pre-exist and match exactly by name. # # If service_chain_shared_net is false, each chain must have its own pre-existing left and right networks. -# An index will be appended to each network name to form the final name: +# left and right can take either a string prefix or a list of arbitrary network names +# If a string prefix is passed, an index will be appended to each network name to form the final name. +# Example: +# external_networks: +# left: 'ext-lnet' +# right: 'ext-rnet' # ext-lnet0 ext-rnet0 for chain #0 # ext-lnet1 ext-rnet1 for chain #1 # etc... +# If a list of strings is passed, each string in the list must be the name of the network used for the +# chain indexed by the entry position in the list. +# The list must have at least as many entries as there are chains +# Example: +# external_networks: +# left: ['ext-lnet', 'ext-lnet2'] +# right: ['ext-rnet', 'ext-rnet2'] +# external_networks: - left: 'ext-lnet' - right: 'ext-rnet' + left: + right: +# PVP with L3 router in the packet path only. +# Only use when l3_router option is True (see l3_router) +# Prefix names of edge networks which will be used to send traffic via traffic generator. +# If a network with given name already exists it will be reused. +# Otherwise a new edge network will be created with that name, subnet and CIDR. +# +# gateway can be set in case of L3 traffic with edge networks - refer to edge_networks +# +# segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id +# will be assigned by Neutron. +# Must be unique for each network +# physical_network can be set to pick a specific phsyical network - by default (empty) the +# default physical network will be picked +# +edge_networks: + left: + name: 'nfvbench-net2' + router_name: 'router_left' + subnet: 'nfvbench-subnet2' + cidr: '192.168.3.0/24' + gateway: + network_type: + segmentation_id: + physical_network: + right: + name: 'nfvbench-net3' + router_name: 'router_right' + subnet: 'nfvbench-subnet3' + cidr: '192.168.4.0/24' + gateway: + network_type: + segmentation_id: + physical_network: + +# Use 'true' to enable VXLAN encapsulation support and sent by the traffic generator +# When this option enabled internal networks 'network type' parameter value should be 'vxlan' +# VxLAN and MPLS encapsulations are mutual exclusive if 'vxlan' is true then 'mpls' should be false +# and vise versa +vxlan: false +# Use 'true' to enable MPLS encapsulation support and sent by the traffic generator +# When this option enabled internal networks 'network type' parameter value should be 'mpls' +# MPLS and VxLAN encapsulations are mutual exclusive if 'mpls' is 'true' then 'vxlan' should be set to 'false' +# and vise versa. no_flow_stats, no_latency_stats, no_latency_streams should be set to 'true' because these +# features are not supported at the moment. In future when these features will be supported they will require +# special NIC hardware. Only 2 label stack supported at the moment where one label is transport and another +# is VPN for more details please refer to 'mpls_transport_labels' and 'segmentation_id' in networks configuration +mpls: false # Use 'true' to enable VLAN tagging of packets generated and sent by the traffic generator # Leave empty or set to false if you do not want the traffic generator to insert the VLAN tag (this is # needed for example if VLAN tagging is enabled on switch (access mode) or if you want to hook # directly to a NIC). # By default is set to true (which is the nominal use case with TOR and trunk mode to Trex ports) +# If VxLAN or MPLS are enabled, this option should be set to false (vlan tagging for encapsulated packets +# is not supported). Use the vtep_vlan option to enable vlan tagging for the VxLAN overlay network. vlan_tagging: true -# Used only in the case of EXT chain and no openstack to specify the VLAN IDs to use. +# Used only in the case of EXT chain and no openstack or not admin access to specify the VLAN IDs to use. # This property is ignored when OpenStakc is used or in the case of l2-loopback. # If OpenStack is used leave the list empty, VLAN IDs are retrieved from OpenStack networks using Neutron API. # If networks are shared across all chains (service_chain_shared_net=true), the list should have exactly 2 values @@ -332,6 +667,11 @@ vlans: [] # (see mac_addrs_left and mac_addrs_right) no_arp: false +# Loop VM (VPP forwarder) can use ARP to discover next hop mac address +# False (default): do not send ARP but use static config devices macs instead (TRex gratuitous ARP are not interpreted by VPP) +# True: ARP requests are sent to find out next hop MAC addresses (for instance SDN-GW) +loop_vm_arp: false + # Traffic Profiles # You can add here more profiles as needed # `l2frame_size` can be specified in any none zero integer value to represent the size in bytes @@ -358,6 +698,11 @@ traffic: # Can be overriden by --no-traffic no_traffic: false +# Use an L3 router in the packet path. This option if set will create or reuse an openstack neutron +# router (PVP, PVVP) or reuse an existing L3 router (EXT) to route traffic to the destination VM. +# Can be overriden by --l3-router +l3_router: false + # Test configuration # The rate pps for traffic going in reverse direction in case of unidirectional flow. Default to 1. @@ -466,3 +811,24 @@ user_label: # Can be overriden by --no-vswitch-access # Should be left to the default value (false) no_vswitch_access: false + + +# Enable service mode for trafic capture from TRex console (for debugging purpose) +# Can be overriden by --service-mode +# Should be left to the default value (false) +service_mode: false + +# Disable extra flow stats (on high load traffic) +# Can be overriden by --no-flow-stats +# Should be left to the default value (false) +no_flow_stats: false + +# Disable flow stats for latency traffic +# Can be overriden by --no-latency-stats +# Should be left to the default value (false) +no_latency_stats: false + +# Disable latency measurements (no streams) +# Can be overriden by --no-latency-stream +# Should be left to the default value (false) +no_latency_streams: false