X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=nfvbench%2Fcfg.default.yaml;h=97f98cdfbbcb65be152144b580318de130452456;hb=423f360415e2834dd8de065434023b822e2ca3f8;hp=a462383901b059d6563cb3fd4907c6fd20ab3a99;hpb=0ab04ab7103fc4bfb17042b60c1e005049375cc2;p=nfvbench.git diff --git a/nfvbench/cfg.default.yaml b/nfvbench/cfg.default.yaml old mode 100644 new mode 100755 index a462383..97f98cd --- a/nfvbench/cfg.default.yaml +++ b/nfvbench/cfg.default.yaml @@ -19,21 +19,29 @@ # option, e.g. "--interval" -# Name of the image to use for launching the loopback VMs. This name must be -# the exact same name used in OpenStack (as shown from 'nova image-list') -# Can be overridden by --image or -i -image_name: 'nfvbenchvm' +# The OpenStack openrc file to use - must be a valid full pathname. If running +# in a container, this path must be valid in the container. +# +# The only case where this field can be empty is when measuring a system that does not run +# OpenStack or when OpenStack APIs are not accessible or OpenStack APis use is not +# desirable. In that case the EXT service chain must be used. +# +# If openrc is not admin some parameters are mandatory and must be filled with valid values in config file such as : +# - availability_zone +# - hypervisor_hostname +# - vlans +openrc_file: + # Forwarder to use in nfvbenchvm image. Available options: ['vpp', 'testpmd'] vm_forwarder: testpmd -# NFVbench can automatically upload a VM image if the image named by -# image_name is missing, for that you need to specify a file location where -# the image can be retrieved -# -# To upload the image as a file, download it to preferred location -# and prepend it with file:// like in this example: -# file:// -# NFVbench (the image must have the same name as defined in image_name above). +# By default (empty) NFVbench will try to locate a VM image file +# from the package root directory named "nfvbench-.qcow2" and +# upload that file. The image name will be "nfvbench-" +# This can be overridden by specifying here a pathname of a file +# that follows the same naming convention. +# In most cases, this field should be left empty as the packaging should +# include the proper VM image file vm_image_file: # Name of the flavor to use for the loopback VMs @@ -43,12 +51,12 @@ vm_image_file: # Otherwise, a new flavor will be created with attributes listed below. flavor_type: 'nfvbench.medium' -# Custom flavor attributes +# Custom flavor attributes for the test VM flavor: - # Number of vCPUs for the flavor + # Number of vCPUs for the flavor, must be at least 2! vcpus: 2 # Memory for the flavor in MB - ram: 8192 + ram: 4096 # Size of local disk in GB disk: 0 # metadata are supported and can be added if needed, optional @@ -60,43 +68,38 @@ flavor: "hw:cpu_policy": dedicated "hw:mem_page_size": large +# Enable multiqueue for all test VM interfaces (PVP and PVVP only). +# When enabled, the test VM image will get added the property to enable +# multiqueue (hw_vif_multiqueue_enabled='true'). +# The number of queues per interace will be set to the number of vCPUs configured for +# the VM. +# By default there is only 1 queue per interface +# The max allowed queue per interface is 8. +# The valid range for this parameter is [1..min(8, vcpu_count)] +# When multiqueue is used the recommended setting is to set it to same value as the +# number of vCPU used - up to a max of 8 queues. +# Setting to a lower value than vCPU should also work. For example if using 4 vCPU and +# vif_multiqueue_size is set to 2, openstack will create 4 queues per interface but the +# test VM will only use the first 2 queues. +vif_multiqueue_size: 1 + +# Increase number of buffers allocated for VPP VM forwarder. May be needed in scenarios with large +# number of interfaces and worker threads, or a lot of physical interfaces with multiple RSS queues. +# Value is per CPU socket. Default is 16384. +num_mbufs: 16384 + # Name of the availability zone to use for the test VMs # Must be one of the zones listed by 'nova availability-zone-list' -# If the selected zone contains only 1 compute node and PVVP inter-node flow is selected, -# application will use intra-node PVVP flow. -# List of compute nodes can be specified, must be in given availability zone if not empty -#availability_zone: 'nova' +# availability_zone: 'nova' +# If openrc is not admin set a valid value availability_zone: +# To force placement on a given hypervisor, set the name here +# (if multiple names are provided, the first will be used) +# Leave empty to let openstack pick the hypervisor compute_nodes: - - -# Credentials for SSH connection to TOR switches. -tor: - # Leave type empty or switch list empty to skip TOR switches configuration. - # Preferably use 'no_tor_access' to achieve the same behavior. - # (skipping TOR config will require the user to pre-stitch the traffic generator interfaces - # to the service chain under test, needed only if configured in access mode) - type: - # Switches are only needed if type is not empty. - # You can configure 0, 1 or 2 switches - # no switch: in this case NFVbench will not attempt to ssh to the switch - # and stitching of traffic must be done externally - # 1 switch: this assumes that both traffic generator interfaces are wired to the same switch - # 2 switches: this is the recommended setting wuth redundant switches, in this case each - # traffic generator interface must be wired to a different switch - switches: - - host: - username: - password: - port: - -# Skip TOR switch configuration and retrieving of stats -# Can be overriden by --no-tor-access -no_tor_access: false - -# Skip vswitch configuration and retrieving of stats -# Can be overriden by --no-vswitch-access -no_vswitch_access: false +# If openrc is not admin set a valid value for hypervisor hostname +# Example of value: hypervisor_hostname: "server1" +hypervisor_hostname: # Type of service chain to run, possible options are PVP, PVVP and EXT # PVP - port to VM to port @@ -110,24 +113,25 @@ service_chain: 'PVP' # Can be overriden by --service-chain-count service_chain_count: 1 +# Specifies if all chains share the same right/left/middle networks +service_chain_shared_net: false + # Total number of traffic flows for all chains and directions generated by the traffic generator. # Minimum is '2 * service_chain_count', it is automatically adjusted if too small # value was configured. Must be even. # Every flow has packets with different IPs in headers # Can be overriden by --flow-count -flow_count: 2 - -# Used by PVVP chain to spawn VMs on different compute nodes -# Can be overriden by --inter-node -inter_node: false +flow_count: 10000 # set to true if service chains should use SRIOV # This requires SRIOV to be available on compute nodes sriov: false -# Skip interfaces config on EXT service chain -# Can be overriden by --no-int-config -no_int_config: false +# Perform port to port loopback (direct or through switch) +# Should be used with EXT service chain and no ARP (no_arp: true) +# When enabled, the vlans property must contain the same VLAN id for all chains. +# Can be overriden by --l2-loopback +l2_loopback: false # Resources created by NFVbench will not be removed # Can be overriden by --no-cleanup @@ -144,59 +148,188 @@ traffic_generator: default_profile: trex-local # IP addresses for L3 traffic. + # This section describes the addresses to use to fill in the UDP packets sent by the + # traffic generator. If you VNFs are L2 forwarders, these fields below do not need to change. + # If your VNFs are L3 routers, the fields below must match the static routes in your VNFs + # so that UDP packets can be routed back to the peer port of the traffic generator. + # All of the IPs are used as base for IP sequence computed based on chain or flow count. + # (sim-devices-left)---(tg-gateway-left)---(vnf-left)- ... + # -(vnf-right)---(tg-gateway-right)---(sim-devices-right) # # `ip_addrs` base IPs used as src and dst in packet header, quantity depends on flow count + # these are used for addressing virtual devices simulated by the traffic generator + # and be a different subnet than tg_gateway_ip_addrs and gateway_ip_addrs # `ip_addrs_step`: step for generating IP sequence. Use "random" for random patterns, default is 0.0.0.1. - # `tg_gateway_ip_addrs` base IPs for traffic generator ports, quantity depends on chain count - # `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1 - # `gateway_ip_addrs`: base IPs of router gateways on both networks, quantity depends on chain count - # `gateway_ip_addrs_step`: step for generating router gateway sequences. default is 0.0.0.1 - # `udp_src_port`: the source port for sending UDP traffic, default is picked by TRex (53) - # `udp_dst_port`: the destination port for sending UDP traffic, default is picked by TRex (53) ip_addrs: ['10.0.0.0/8', '20.0.0.0/8'] ip_addrs_step: 0.0.0.1 + # `tg_gateway_ip_addrs` base IP for traffic generator ports in the left and right networks to the VNFs + # chain count consecutive IP addresses spaced by tg_gateway_ip_addrs_step will be used + # `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1 tg_gateway_ip_addrs: ['1.1.0.100', '2.2.0.100'] + tg_gateway_ip_cidrs: ['1.1.0.0/24','2.2.0.0/24'] tg_gateway_ip_addrs_step: 0.0.0.1 + # `gateway_ip_addrs`: base IPs of VNF router gateways (left and right), quantity used depends on chain count + # must correspond to the public IP on the left and right networks + # for each left-most and right-most VNF of every chain. + # must be the same subnet but not same IP as tg_gateway_ip_addrs. + # chain count consecutive IP addresses spaced by gateway_ip_addrs_step will be used + # `gateway_ip_addrs_step`: step for generating router gateway sequences. default is 0.0.0.1 gateway_ip_addrs: ['1.1.0.2', '2.2.0.2'] gateway_ip_addrs_step: 0.0.0.1 + # `udp_src_port`: the source port for sending UDP traffic, default is picked by TRex (53) + # `udp_dst_port`: the destination port for sending UDP traffic, default is picked by TRex (53) udp_src_port: udp_dst_port: + # VxLAN only: optionally specify what VLAN tag to use for the VxLAN overlay + # This is used if the vxlan tunnels are running on a specific VLAN. + # Leave empty if there is no VLAN tagging required, or specify the VLAN id to use + # for all VxLAN tunneled traffic + vtep_vlan: + # VxLAN and MPLS only: local/source vteps IP addresses for port 0 and 1 ['10.1.1.230', '10.1.1.231'] + src_vteps: + # VxLAN only: remote IP address of the remote VTEPs that terminate all tunnels originating from local VTEPs + dst_vtep: + # The encapsulated L3/MPLS packet needs to traverse L3 or MPLS fabric to reach to its final dst_vtep. + # This parameter is required to resolve first next-hop MAC address if it next-hop is not its final dst_vtep. + # This parameter is mandatory for MPLS only + vtep_gateway_ips: + # L2 ADDRESSING OF UDP PACKETS + # Lists of dest MAC addresses to use on each traffic generator port (one dest MAC per chain) + # Leave empty for PVP, PVVP, EXT with ARP + # Only used when `service_chain` is EXT and `no_arp` is true. + # - If both lists are empty the far end MAC of the traffic generator will be used for left and right + # (this is typicaly used to loop back on the first hop switch or using a loopback cable) + # - The length of each list must match the number of chains being used! + # - The index of each list must correspond to the chain index to ensure proper pairing. + # - Below is an example of using two chains: + # - mac_addrs_left: ['00:00:00:00:01:00', '00:00:00:00:02:00'] + # - mac_addrs_right: ['00:00:00:00:01:01', '00:00:00:00:02:01'] + # UDP packets sent on port 0 will use dest MAC '00:00:00:00:01:00' for chain #0 and + # dest MAC '00:00:00:00:02:00' for chain #1 + # UDP packets sent on port 1 will use dest MAC '00:00:00:00:01:01' for chain #0 and + # dest MAC '00:00:00:00:02:01' for chain #1 + # It is expected that the looping device (L2 forwarder) will rewrite the src and dst MAC + # of the looping UDP packet so that it can reach back to the peer port of the traffic + # generator. + # + mac_addrs_left: + mac_addrs_right: + # Traffic Generator Profiles # In case you have multiple testbeds or traffic generators, # you can define one traffic generator profile per testbed/traffic generator. + # In most cases you only need to fill in the pci address for the 2 ports used by the + # traffic generator and leave all other fields unchanged # # Generator profiles are listed in the following format: # `name`: Traffic generator profile name (use a unique name, no space or special character) + # Do not change this field # `tool`: Traffic generator tool to be used (currently supported is `TRex`). + # Do not change this field # `ip`: IP address of the traffic generator. - # `cores`: Specify the number of cores for TRex traffic generator. ONLY applies to trex-local. + # The default loopback address is used when the traffic generator runs on the same host + # as NFVbench. + # `cores`: Specify the number of cores for running the TRex traffic generator. + # ONLY applies to trex-local. + # `software_mode`: Advice TRex to use software mode which provides the best compability. But + # note that TRex will not use any hardware acceleration technology under + # software mode, therefore the performance of TRex will be significantly + # lower. ONLY applies to trex-local. + # Recommended to leave the default value (false) + # `limit_memory`: Specify the memory reserved for running the TRex traffic generator (in MB). Limit the amount + # of packet memory used. (Passed to dpdk as -m arg) + # ONLY applies to trex-local. + # `zmq_pub_port`: Specify the ZMQ pub port number for the TRex traffic generator instance (default value is 4500). + # ONLY applies to trex-local. + # `zmq_rpc_port`: Specify the ZMQ rpc port for the TRex traffic generator instance (default value is 4501). + # ONLY applies to trex-local. # `interfaces`: Configuration of traffic generator interfaces. # `interfaces.port`: The port of the traffic generator to be used (leave as 0 and 1 resp.) - # `interfaces.switch_port`: Leave empty (reserved for advanced use cases) + # `interfaces.switch_port`: Leave empty (deprecated) # `interfaces.pci`: The PCI address of the intel NIC interface associated to this port + # This field is required and cannot be empty + # Use lspci to list the PCI address of all devices + # Example of value: "0000:5e:00.0" # `intf_speed`: The speed of the interfaces used by the traffic generator (per direction). + # Empty value (default) to use the speed discovered by the traffic generator. + # Recommended to leave this field empty. + # Do not use unless you want to override the speed discovered by the + # traffic generator. Expected format: 10Gbps + # + # `platform`: Optional. Used to tune the performance and allocate the cores to the right NUMA. + # See https://trex-tgn.cisco.com/trex/doc/trex_manual.html (6.2.3. Platform section configuration) + # for more details + # `platform.master_thread_id`: Hardware thread_id for control thread. (Valid value is mandatory if platform property is set) + # `platform.latency_thread_id`: Hardware thread_id for RX thread. (Valid value is mandatory if platform property is set) + # `platform.dual_if`: Section defines info for interface pairs (according to the order in “interfaces” list). (Valid value is mandatory if platform property is set) + # Each section, starting with “- socket” defines info for different interface pair. (Valid value is mandatory if platform property is set) + # `platform.dual_if.socket`: The NUMA node from which memory will be allocated for use by the interface pair. (Valid value is mandatory if platform property is set) + # `platform.dual_if.threads`: Hardware threads to be used for sending packets for the interface pair. (Valid value is mandatory if platform property is set) + # Threads are pinned to cores, so specifying threads actually determines the hardware cores. + # Example of values: + # platform: + # master_thread_id: 0 + # latency_thread_id: 2 + # dual_if: + # - socket: 0 + # threads: [1] # generator_profile: - name: trex-local tool: TRex ip: 127.0.0.1 - cores: 3 + cores: 4 + software_mode: false + limit_memory: 1024 + zmq_pub_port: 4500 + zmq_rpc_port: 4501 interfaces: - port: 0 - switch_port: pci: - - port: 1 switch_port: + - port: 1 pci: - intf_speed: 10Gbps + switch_port: + intf_speed: + platform: + master_thread_id: + latency_thread_id: + dual_if: + - socket: + threads: + +# Use 'true' to force restart of local TRex server before next run +# TRex local server will be restarted even if restart property is false in case of generator config changes between runs +restart: false + +# Simpler override for trex core count and mbuf multilier factor +# if empty defaults to the one specified in generator_profile.cores +cores: + +# Add cache size in packet generation for TRex field engine (FE). +# More information for TRex performance: +# https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_tutorial_field_engine_significantly_improve_performance +# If cache_size = 0 (or empty): no cache will be used by TRex (default) +# If cache_size < 0: cache_size will be set to flow count value +cache_size: 0 +# The cache size is actually limited by the number of 64B mbufs configured in the trex platform configuration (see Trex manual 6.2.2. Memory section configuration) +# Trex will use 1 x 64B mbuf per pre-built cached packet, assuming 1 pre-built cached packet per flow, it means for very large number of flows, the number of configured mbuf_64 will need to be set accordingly. +mbuf_64: + +# mbuffer ratio to use for TRex (see TRex documentation for more details) +mbuf_factor: 0.2 + +# A switch to disable hdrh +# hdrh is enabled by default and requires TRex v2.58 or higher +disable_hdrh: false # ----------------------------------------------------------------------------- # These variables are not likely to be changed -# The openrc file -openrc_file: +# Number of seconds to wait for VMs to pass traffic in both directions +check_traffic_time_sec: 200 # General retry count generic_retry_count: 100 @@ -207,21 +340,28 @@ generic_poll_sec: 2 # name of the loop VM loop_vm_name: 'nfvbench-loop-vm' -# Default names, subnets and CIDRs for PVP/PVVP networks +# Default names, subnets and CIDRs for PVP/PVVP networks (openstack only) +# # If a network with given name already exists it will be reused. # - PVP only uses left and right # - PVVP uses left, middle and right # - for EXT chains, this structure is not relevant - refer to external_networks # Otherwise a new internal network will be created with that name, subnet and CIDR. -# -# segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id -# will be assigned by Neutron. -# Must be unique for each network +# +# network_type must be 'vlan' (for VLAN and SRIOV) or 'vxlan' (for VxLAN) +# all 3 networks must use the same network type in this release +# segmentation_id can be set to enforce a specific segmentation id (vlan ID or VNI if vxlan) +# by default (empty) the segmentation id will be assigned by Neutron. +# If specified, it must be unique for each network +# For multi-chaining, see notes below # physical_network can be set to pick a specific phsyical network - by default (empty) the # default physical network will be picked -# In the case of SR-IOV, both physical_network and segmentation ID must be provided -# For example to setup PVP using 2 different SR-IOV ports, you must put the appropriate physnet +# SR-IOV: both physical_network and VLAN segmentation ID must be provided +# VxLAN: the VNI must generally be provided (except special Neutron VxLAN implementations) +# +# For example to setup 1xPVP using 2 different SR-IOV ports, you must put the appropriate physnet # names under left.physical_network and right.physical_network. +# For multi-chaining and non shared networks, # Example of override configuration to force PVP to run on 2 SRIOV ports (phys_sriov0 and phys_sriov1) # using VLAN ID 2000 and 2001: # internal_networks: @@ -231,50 +371,284 @@ loop_vm_name: 'nfvbench-loop-vm' # right: # segmentation_id: 2001 # physical_network: phys_sriov1 +# +# For multi-chaining and non shared network mode (VLAN, SRIOV, VxLAN, MPLS): +# - the segmentation_id field if provided must be a list of values (as many as chains) +# - segmentation_id auto-indexing: +# the segmentation_id field can also be a single value that represents the base value from which +# values for each chain is derived using the chain ID as an offset. For example +# if 2000 is specified, NFVbench will use 2000 for chain 0, 2001 for chain 1 etc... +# The ranges of all the networks must not overlap. +# - the physical_network can be a single name (all VFs to be allocated on same physnet) +# of a list of physnet names to use different PFs +# +# Example of 2-chain VLAN configuration: +# internal_networks: +# left: +# segmentation_id: [2000, 2001] +# physical_network: phys_sriov0 +# right: +# segmentation_id: [2010, 2011] +# physical_network: phys_sriov1 +# Equivalent to (using auto-indexing): +# internal_networks: +# left: +# segmentation_id: 2000 +# physical_network: phys_sriov0 +# right: +# segmentation_id: 2010 +# physical_network: phys_sriov1 +# +# - mpls_transport_labels is used only when MPLS encapsulation is enabled (mpls: true) +# this parameter doesn't support auto-indexing because this is not a typical scenario +# expected the list of values in a range 256-1048575, one value per chain is expected +# +# In the bellow configuration example 'segmentation_id; contains the inner MPLS label for each chain +# and 'mpls_transport_labels' contains the outer transport MPLS label for each chain +# Example of 2-chain MPLS configuration: +# internal_networks: +# left: +# network_type: mpls +# segmentation_id: [2000, 2001] +# mpls_transport_labels: [10000, 10000] +# physical_network: phys_sriov0 +# right: +# network_type: mpls +# segmentation_id: [2010, 2011] +# mpls_transport_labels: [11000, 11000] +# physical_network: phys_sriov1 + internal_networks: left: - name: 'nfvbench-net0' - subnet: 'nfvbench-subnet0' + name: 'nfvbench-lnet' + subnet: 'nfvbench-lsubnet' cidr: '192.168.1.0/24' network_type: 'vlan' segmentation_id: physical_network: + mpls_transport_labels: right: - name: 'nfvbench-net1' - subnet: 'nfvbench-subnet1' + name: 'nfvbench-rnet' + subnet: 'nfvbench-rsubnet' cidr: '192.168.2.0/24' network_type: 'vlan' segmentation_id: physical_network: + mpls_transport_labels: middle: - name: 'nfvbench-net2' - subnet: 'nfvbench-subnet2' + name: 'nfvbench-mnet' + subnet: 'nfvbench-msubnet' cidr: '192.168.3.0/24' network_type: 'vlan' segmentation_id: physical_network: + mpls_transport_labels: + +# IDLE INTERFACES: PVP, PVVP and non shared net only. +# By default each test VM will have 2 virtual interfaces for looping traffic. +# If service_chain_shared_net is false, additional virtual interfaces can be +# added at VM creation time, these interfaces will not carry any traffic and +# can be used to test the impact of idle interfaces in the overall performance. +# All these idle interfaces will use normal ports (not direct). +# Number of idle interfaces per VM (none by default) +idle_interfaces_per_vm: 0 + +# A new network is created for each idle interface. +# If service_chain_shared_net is true, the options below will be ignored +# and no idle interfaces will be added. +idle_networks: + # Prefix for all idle networks, the final name will append the chain ID and idle index + # e.g. "nfvbench-idle-net.0.4" chain 0 idle index 4 + name: 'nfvbench-idle-net' + # Subnet name to use for all idle subnetworks + subnet: 'nfvbench-idle-subnet' + # CIDR to use for all idle networks (value should not matter) + cidr: '192.169.1.0/24' + # Type of network associated to the idle virtual interfaces (vlan or vxlan) + network_type: 'vlan' + # segmentation ID to use for the network attached to the idle virtual interfaces + # vlan: leave empty to let neutron pick the segmentation ID + # vxlan: must specify the starting VNI value to be used (cannot be empty) + # Note that NFVbench will use as many consecutive segmentation IDs as needed. + # For example, for 4 PVP chains and 8 idle + # interfaces per VM, NFVbench will use 32 consecutive values of segmentation ID + # starting from the value provided. + segmentation_id: + # physnet name to use for all idle interfaces + physical_network: + +# MANAGEMENT INTERFACE +# By default each test VM will have 2 virtual interfaces for looping traffic. +# If use_management_port is true, additional virtual interface can be +# added at VM creation time, this interface will be used for VM management over SSH. +# This will be helpful for debug (forwarder config, capture traffic...) +# or to emulate VNF with management interface +use_management_port: false -# EXT chain only. Names of edge networks which will be used to send traffic via traffic generator. +# If a network with given name already exists it will be reused. +# Otherwise a new network is created for management interface. +# If use_management_port is false, the options below will be ignored +# and no management interface will be added. +management_network: + name: 'nfvbench-management-net' + # Subnet name to use for management subnetwork + subnet: 'nfvbench-management-subnet' + # CIDR to use for management network + cidr: '192.168.0.0/24' + gateway: '192.168.0.254' + # Type of network associated to the management virtual interface (vlan or vxlan) + network_type: 'vlan' + # segmentation ID to use for the network attached to the management virtual interface + # vlan: leave empty to let neutron pick the segmentation ID + # vxlan: must specify the starting VNI value to be used (cannot be empty) + segmentation_id: + # physnet name to use for all idle interfaces + physical_network: + +# Floating IP for management interface +# If use_floating_ip is true, floating IP will be set on management interface port +# One floating IP by loop VM will be used (floating ips are often limited, +# use them on limited context mainly for debug). If there are 10 PVP chains, this will require 10 +# floating IPs. If 10 PVVP chains, it will require 20 floating IPs +use_floating_ip: false + +# If a network with given name already exists it will be reused. +# Set same name as management_network if you want to use a floating IP from this network +# Otherwise set name, subnet and CIDR information from your floating IP pool network +# Floating network used to set floating IP on management port. +# Only 1 floating network will be used for all VMs and chains (shared network). +# If use_floating_ip is false, the options below will be ignored +# and no floating IP will be added. +floating_network: + name: 'nfvbench-floating-net' + # Subnet name to use for floating subnetwork + subnet: 'nfvbench-floating-subnet' + # CIDR to use for floating network + cidr: '192.168.0.0/24' + # Type of network associated to the management virtual interface (vlan or vxlan) + network_type: 'vlan' + # segmentation ID to use for the network attached to the management virtual interface + # vlan: leave empty to let neutron pick the segmentation ID + # vxlan: must specify the starting VNI value to be used (cannot be empty) + segmentation_id: + # physnet name to use for all idle interfaces + physical_network: + +# In the scenario of PVVP + SRIOV, there is choice of how the traffic will be +# handled in the middle network. The default (false) will use vswitch, while +# SRIOV can be used by toggling below setting. +use_sriov_middle_net: false + +# EXT chain only. Prefix names of edge networks or list of edge network names +# used to send traffic via traffic generator. +# +# If service_chain_shared_net is true, the left and right networks must pre-exist and match exactly by name. +# +# If service_chain_shared_net is false, each chain must have its own pre-existing left and right networks. +# left and right can take either a string prefix or a list of arbitrary network names +# If a string prefix is passed, an index will be appended to each network name to form the final name. +# Example: +# external_networks: +# left: 'ext-lnet' +# right: 'ext-rnet' +# ext-lnet0 ext-rnet0 for chain #0 +# ext-lnet1 ext-rnet1 for chain #1 +# etc... +# If a list of strings is passed, each string in the list must be the name of the network used for the +# chain indexed by the entry position in the list. +# The list must have at least as many entries as there are chains +# Example: +# external_networks: +# left: ['ext-lnet', 'ext-lnet2'] +# right: ['ext-rnet', 'ext-rnet2'] +# external_networks: - left: 'nfvbench-net0' - right: 'nfvbench-net1' + left: + right: + +# PVP with L3 router in the packet path only. +# Only use when l3_router option is True (see l3_router) +# Prefix names of edge networks which will be used to send traffic via traffic generator. +# If a network with given name already exists it will be reused. +# Otherwise a new edge network will be created with that name, subnet and CIDR. +# +# gateway can be set in case of L3 traffic with edge networks - refer to edge_networks +# +# segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id +# will be assigned by Neutron. +# Must be unique for each network +# physical_network can be set to pick a specific phsyical network - by default (empty) the +# default physical network will be picked +# +edge_networks: + left: + name: 'nfvbench-net2' + router_name: 'router_left' + subnet: 'nfvbench-subnet2' + cidr: '192.168.3.0/24' + gateway: + network_type: + segmentation_id: + physical_network: + right: + name: 'nfvbench-net3' + router_name: 'router_right' + subnet: 'nfvbench-subnet3' + cidr: '192.168.4.0/24' + gateway: + network_type: + segmentation_id: + physical_network: -# Use 'true' to enable VLAN tagging of packets coming from traffic generator -# Leave empty if VLAN tagging is enabled on switch or if you want to hook directly to a NIC -# Else by default is set to true (which is the nominal use case with TOR and trunk mode to Trex) +# Use 'true' to enable VXLAN encapsulation support and sent by the traffic generator +# When this option enabled internal networks 'network type' parameter value should be 'vxlan' +# VxLAN and MPLS encapsulations are mutual exclusive if 'vxlan' is true then 'mpls' should be false +# and vise versa +vxlan: false +# Use 'true' to enable MPLS encapsulation support and sent by the traffic generator +# When this option enabled internal networks 'network type' parameter value should be 'mpls' +# MPLS and VxLAN encapsulations are mutual exclusive if 'mpls' is 'true' then 'vxlan' should be set to 'false' +# and vise versa. no_flow_stats, no_latency_stats, no_latency_streams should be set to 'true' because these +# features are not supported at the moment. In future when these features will be supported they will require +# special NIC hardware. Only 2 label stack supported at the moment where one label is transport and another +# is VPN for more details please refer to 'mpls_transport_labels' and 'segmentation_id' in networks configuration +mpls: false +# Use 'true' to enable VLAN tagging of packets generated and sent by the traffic generator +# Leave empty or set to false if you do not want the traffic generator to insert the VLAN tag (this is +# needed for example if VLAN tagging is enabled on switch (access mode) or if you want to hook +# directly to a NIC). +# By default is set to true (which is the nominal use case with TOR and trunk mode to Trex ports) +# If VxLAN or MPLS are enabled, this option should be set to false (vlan tagging for encapsulated packets +# is not supported). Use the vtep_vlan option to enable vlan tagging for the VxLAN overlay network. vlan_tagging: true -# Specify only when you want to override VLAN IDs used for tagging with own values (exactly 2). -# Default behavior of VLAN tagging is to retrieve VLAN IDs from OpenStack networks provided above. -# In case of VxLAN this setting is ignored and only vtep_vlan from traffic generator profile is used. -# Example: [1998, 1999] +# Used only in the case of EXT chain and no openstack or not admin access to specify the VLAN IDs to use. +# This property is ignored when OpenStakc is used or in the case of l2-loopback. +# If OpenStack is used leave the list empty, VLAN IDs are retrieved from OpenStack networks using Neutron API. +# If networks are shared across all chains (service_chain_shared_net=true), the list should have exactly 2 values +# If networks are not shared across chains (service_chain_shared_net=false), the list should have +# 2 list of vlan IDs +# In the special case of l2-loopback the list should have the same VLAN id for all chains +# Examples: +# [1998, 1999] left network uses vlan 1998 right network uses vlan 1999 +# [[1,2],[3,4]] chain 0 left vlan 1, right vlan 2 - chain 1 left vlan 3 right vlan 4 +# [1010, 1010] same VLAN id with l2-loopback enabled +# vlans: [] -# Used only with EXT chain. MAC addresses of traffic generator ports are used as destination -# if 'no_arp' is set to 'true'. Otherwise ARP requests are sent to find out destination MAC addresses. +# ARP is used to discover the MAC address of VNFs that run L3 routing. +# Used only with EXT chain. +# False (default): ARP requests are sent to find out dest MAC addresses. +# True: do not send ARP but use provisioned dest macs instead +# (see mac_addrs_left and mac_addrs_right) no_arp: false +# Loop VM (VPP forwarder) can use ARP to discover next hop mac address +# False (default): do not send ARP but use static config devices macs instead (TRex gratuitous ARP are not interpreted by VPP) +# True: ARP requests are sent to find out next hop MAC addresses (for instance SDN-GW) +loop_vm_arp: false + # Traffic Profiles # You can add here more profiles as needed # `l2frame_size` can be specified in any none zero integer value to represent the size in bytes @@ -301,9 +675,10 @@ traffic: # Can be overriden by --no-traffic no_traffic: false -# Do not reset tx/rx counters prior to running -# Can be overriden by --no-reset -no_reset: false +# Use an L3 router in the packet path. This option if set will create or reuse an openstack neutron +# router (PVP, PVVP) or reuse an existing L3 router (EXT) to route traffic to the destination VM. +# Can be overriden by --l3-router +l3_router: false # Test configuration @@ -330,6 +705,9 @@ duration_sec: 60 # Can be overridden by --interval interval_sec: 10 +# Default pause between iterations of a binary search (NDR/PDR) +pause_sec: 2 + # NDR / PDR configuration measurement: # Drop rates represent the ratio of dropped packet to the total number of packets sent. @@ -343,7 +721,7 @@ measurement: # The accuracy of NDR and PDR as a percnetage of line rate; The exact NDR # or PDR should be within `load_epsilon` line rate % from the one calculated. # For example, with a value 0.1, and a line rate of 10Gbps, the accuracy - # of NDR and PDR will be within 0.1% Of 10Gbps or 10Kbps. + # of NDR and PDR will be within 0.1% Of 10Gbps or 10Mbps. # The lower the value the more iterations and the longer it will take to find the NDR/PDR. # In practice, due to the precision of the traffic generator it is not recommended to # set it to lower than 0.1 @@ -364,6 +742,70 @@ std_json: # Can be overriden by --debug debug: false +# Set to a valid path name if logging to file is to be enabled +# Defaults to disabled +log_file: + +# When enabled, all results and/or logs will be sent to a fluentd servers at the requested IPs and ports +# A list of one or more fluentd servers identified by their IPs and port numbers should be given. +# For each recipient it is possible to enable both sending logs and performance +# results, or enable either logs or performance results. For enabling logs or results logging_tag or +# result_tag should be set. + +fluentd: + # by default (logging_tag is empty) nfvbench log messages are not sent to fluentd + # to enable logging to fluents, specify a valid fluentd tag name to be used for the + # log records + - logging_tag: + + # by default (result_tag is empty) nfvbench results are not sent to fluentd + # to enable sending nfvbench results to fluentd, specify a valid fluentd tag name + # to be used for the results records, which is different than logging_tag + result_tag: + + # IP address of the server, defaults to loopback + ip: 127.0.0.1 + + # port # to use, by default, use the default fluentd forward port + port: 24224 + + # by default (logging_tag is empty) nfvbench log messages are not sent to fluentd + # to enable logging to fluents, specify a valid fluentd tag name to be used for the + # log records + # Module and class name of factory which will be used to provide classes dynamically for other components. factory_module: 'nfvbench.factory' factory_class: 'BasicFactory' + +# Custom label added for every perf record generated during this run. +# Can be overriden by --user-label +user_label: + + +# THESE FIELDS SHOULD BE USED VERY RARELY + +# Skip vswitch configuration and retrieving of stats +# Can be overriden by --no-vswitch-access +# Should be left to the default value (false) +no_vswitch_access: false + + +# Enable service mode for trafic capture from TRex console (for debugging purpose) +# Can be overriden by --service-mode +# Should be left to the default value (false) +service_mode: false + +# Disable extra flow stats (on high load traffic) +# Can be overriden by --no-flow-stats +# Should be left to the default value (false) +no_flow_stats: false + +# Disable flow stats for latency traffic +# Can be overriden by --no-latency-stats +# Should be left to the default value (false) +no_latency_stats: false + +# Disable latency measurements (no streams) +# Can be overriden by --no-latency-stream +# Should be left to the default value (false) +no_latency_streams: false