2 # NFVbench default configuration file
4 # This configuration file is ALWAYS loaded by NFVbench and should never be modified by users.
5 # To specify your own property values, always define them in a separate config file
6 # and pass that file to the script using -c or --config <file>
7 # Property values in that config file will override the default values in the current file
10 # IMPORTANT CUSTOMIZATION NOTES
11 # There are roughly 2 types of NFVbench config based on the OpenStack encaps used:
12 # - VLAN (OVS, OVS-DPDK, ML2/VPP)
13 # Many of the fields to customize are relevant to only 1 of the 2 encaps
14 # These will be clearly labeled "VxLAN only" or "VLAN only"
15 # Fields that are not applicable will not be used by NFVbench and can be left empty
17 # All fields are applicable to all encaps/traffic generators unless explicitly marked otherwise.
18 # Fields that can be over-ridden at the command line are marked with the corresponding
19 # option, e.g. "--interval"
22 # The OpenStack openrc file to use - must be a valid full pathname. If running
23 # in a container, this path must be valid in the container.
25 # The only case where this field can be empty is when measuring a system that does not run
26 # OpenStack or when OpenStack APIs are not accessible or OpenStack APis use is not
27 # desirable. In that case the EXT service chain must be used.
29 # If openrc is not admin some parameters are mandatory and must be filled with valid values in config file such as :
31 # - hypervisor_hostname
35 # Forwarder to use in nfvbenchvm image. Available options: ['vpp', 'testpmd']
38 # By default (empty) NFVbench will try to locate a VM image file
39 # from the package root directory named "nfvbench-<version>.qcow2" and
40 # upload that file. The image name will be "nfvbench-<version>"
41 # This can be overridden by specifying here a pathname of a file
42 # that follows the same naming convention.
43 # In most cases, this field should be left empty as the packaging should
44 # include the proper VM image file
47 # Name of the flavor to use for the loopback VMs
49 # If the provided name is an exact match to a flavor name known by OpenStack
50 # (as shown from 'nova flavor-list'), that flavor will be reused.
51 # Otherwise, a new flavor will be created with attributes listed below.
52 flavor_type: 'nfvbench.medium'
54 # Custom flavor attributes for the test VM
56 # Number of vCPUs for the flavor, must be at least 2!
58 # Memory for the flavor in MB
60 # Size of local disk in GB
62 # metadata are supported and can be added if needed, optional
63 # note that if your openstack does not have NUMA optimization
64 # (cpu pinning and huge pages)
65 # you must comment out extra_specs completely otherwise
66 # loopback VM creation will fail
68 "hw:cpu_policy": dedicated
69 "hw:mem_page_size": large
71 # Enable multiqueue for all test VM interfaces (PVP and PVVP only).
72 # When enabled, the test VM image will get added the property to enable
73 # multiqueue (hw_vif_multiqueue_enabled='true').
74 # The number of queues per interace will be set to the number of vCPUs configured for
76 # By default there is only 1 queue per interface
77 # The max allowed queue per interface is 8.
78 # The valid range for this parameter is [1..min(8, vcpu_count)]
79 # When multiqueue is used the recommended setting is to set it to same value as the
80 # number of vCPU used - up to a max of 8 queues.
81 # Setting to a lower value than vCPU should also work. For example if using 4 vCPU and
82 # vif_multiqueue_size is set to 2, openstack will create 4 queues per interface but the
83 # test VM will only use the first 2 queues.
84 vif_multiqueue_size: 1
86 # Increase number of buffers allocated for VPP VM forwarder. May be needed in scenarios with large
87 # number of interfaces and worker threads, or a lot of physical interfaces with multiple RSS queues.
88 # Value is per CPU socket. Default is 16384.
91 # Name of the availability zone to use for the test VMs
92 # Must be one of the zones listed by 'nova availability-zone-list'
93 # availability_zone: 'nova'
94 # If openrc is not admin set a valid value
96 # To force placement on a given hypervisor, set the name here
97 # (if multiple names are provided, the first will be used)
98 # Leave empty to let openstack pick the hypervisor
100 # If openrc is not admin set a valid value for hypervisor hostname
101 # Example of value: hypervisor_hostname: "server1"
104 # Type of service chain to run, possible options are PVP, PVVP and EXT
105 # PVP - port to VM to port
106 # PVVP - port to VM to VM to port
107 # EXT - external chain used only for running traffic and checking traffic generator counters,
108 # all other parts of chain must be configured manually
109 # Can be overriden by --service-chain
112 # Total number of service chains, every chain has own traffic stream
113 # Can be overriden by --service-chain-count
114 service_chain_count: 1
116 # Specifies if all chains share the same right/left/middle networks
117 service_chain_shared_net: false
119 # Total number of traffic flows for all chains and directions generated by the traffic generator.
120 # Minimum is '2 * service_chain_count', it is automatically adjusted if too small
121 # value was configured. Must be even.
122 # Every flow has packets with different IPs in headers
123 # Can be overriden by --flow-count
126 # set to true if service chains should use SRIOV
127 # This requires SRIOV to be available on compute nodes
130 # Perform port to port loopback (direct or through switch)
131 # Should be used with EXT service chain and no ARP (no_arp: true)
132 # When enabled, the vlans property must contain the same VLAN id for all chains.
133 # Can be overriden by --l2-loopback
136 # Resources created by NFVbench will not be removed
137 # Can be overriden by --no-cleanup
140 # Configuration for traffic generator
142 # Name of the traffic generator, only for informational purposes
143 host_name: 'nfvbench_tg'
144 # this is the default traffic generator profile to use
145 # the name must be defined under generator_profile
146 # you can override the traffic generator to use using the
147 # -g or --traffic-gen option at the command line
148 default_profile: trex-local
150 # IP addresses for L3 traffic.
151 # This section describes the addresses to use to fill in the UDP packets sent by the
152 # traffic generator. If you VNFs are L2 forwarders, these fields below do not need to change.
153 # If your VNFs are L3 routers, the fields below must match the static routes in your VNFs
154 # so that UDP packets can be routed back to the peer port of the traffic generator.
156 # All of the IPs are used as base for IP sequence computed based on chain or flow count.
157 # (sim-devices-left)---(tg-gateway-left)---(vnf-left)- ...
158 # -(vnf-right)---(tg-gateway-right)---(sim-devices-right)
160 # `ip_addrs` base IPs used as src and dst in packet header, quantity depends on flow count
161 # these are used for addressing virtual devices simulated by the traffic generator
162 # and be a different subnet than tg_gateway_ip_addrs and gateway_ip_addrs
163 # `ip_addrs_step`: step for generating IP sequence. Use "random" for random patterns, default is 0.0.0.1.
164 ip_addrs: ['10.0.0.0/8', '20.0.0.0/8']
165 ip_addrs_step: 0.0.0.1
167 #'ip_src_static': an attribute to precise the state of source IP during the generation of traffic, It indicates whether
168 # the IP source variate or remain constant. Use True for constant IP and False for varying IPs.
169 # default value is True
172 # `tg_gateway_ip_addrs` base IP for traffic generator ports in the left and right networks to the VNFs
173 # chain count consecutive IP addresses spaced by tg_gateway_ip_addrs_step will be used
174 # `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1
175 tg_gateway_ip_addrs: ['192.168.1.100', '192.168.2.100']
176 tg_gateway_ip_cidrs: ['192.168.1.0/24','192.168.2.0/24']
177 tg_gateway_ip_addrs_step: 0.0.0.1
178 # `gateway_ip_addrs`: base IPs of VNF router gateways (left and right), quantity used depends on chain count
179 # must correspond to the public IP on the left and right networks
180 # for each left-most and right-most VNF of every chain.
181 # must be the same subnet but not same IP as tg_gateway_ip_addrs.
182 # chain count consecutive IP addresses spaced by gateway_ip_addrs_step will be used
183 # `gateway_ip_addrs_step`: step for generating router gateway sequences. default is 0.0.0.1
184 gateway_ip_addrs: ['192.168.1.1', '192.168.2.1']
185 gateway_ip_addrs_step: 0.0.0.1
187 # UDP DEFINED VARIABLES
188 # TRex pick default UDP port (53) but the range of UDP source and destination ports are also
189 # defined from configuration file by using the following attributes:
191 # `udp_src_port`: the source port for sending UDP traffic, default is picked by TRex (53)
192 # `udp_dst_port`: the destination port for sending UDP traffic, default is picked by TRex (53)
193 # `udp_src_port` and `udp_dst_port` can be defined by a single port or a range. Example:
195 # udp_dst_port: ['1024','65000']
196 # `udp_port_step`: the step between two generated ports, default is equal to '1'
199 # Following TRex functionalities, incrementation and decrementation of source port and destination
200 # port values occur simultaneously.
201 # So, in order to reach the highest possible number of packets, it's recommended that the range of source ports
202 # minus the range of destination ports should be different of 1
203 # i.e: |range[source_port] - range[destination_port]| = 1
208 # VxLAN only: optionally specify what VLAN tag to use for the VxLAN overlay
209 # This is used if the vxlan tunnels are running on a specific VLAN.
210 # Leave empty if there is no VLAN tagging required, or specify the VLAN id to use
211 # for all VxLAN tunneled traffic
213 # VxLAN and MPLS only: local/source vteps IP addresses for port 0 and 1 ['10.1.1.230', '10.1.1.231']
215 # VxLAN only: remote IP address of the remote VTEPs that terminate all tunnels originating from local VTEPs
217 # The encapsulated L3/MPLS packet needs to traverse L3 or MPLS fabric to reach to its final dst_vtep.
218 # This parameter is required to resolve first next-hop MAC address if it next-hop is not its final dst_vtep.
219 # This parameter is mandatory for MPLS only
221 # L2 ADDRESSING OF UDP PACKETS
222 # Lists of dest MAC addresses to use on each traffic generator port (one dest MAC per chain)
223 # Leave empty for PVP, PVVP, EXT with ARP
224 # Only used when `service_chain` is EXT and `no_arp` is true.
225 # - If both lists are empty the far end MAC of the traffic generator will be used for left and right
226 # (this is typicaly used to loop back on the first hop switch or using a loopback cable)
227 # - The length of each list must match the number of chains being used!
228 # - The index of each list must correspond to the chain index to ensure proper pairing.
229 # - Below is an example of using two chains:
230 # - mac_addrs_left: ['00:00:00:00:01:00', '00:00:00:00:02:00']
231 # - mac_addrs_right: ['00:00:00:00:01:01', '00:00:00:00:02:01']
232 # UDP packets sent on port 0 will use dest MAC '00:00:00:00:01:00' for chain #0 and
233 # dest MAC '00:00:00:00:02:00' for chain #1
234 # UDP packets sent on port 1 will use dest MAC '00:00:00:00:01:01' for chain #0 and
235 # dest MAC '00:00:00:00:02:01' for chain #1
236 # It is expected that the looping device (L2 forwarder) will rewrite the src and dst MAC
237 # of the looping UDP packet so that it can reach back to the peer port of the traffic
243 # Traffic Generator Profiles
244 # In case you have multiple testbeds or traffic generators,
245 # you can define one traffic generator profile per testbed/traffic generator.
246 # In most cases you only need to fill in the pci address for the 2 ports used by the
247 # traffic generator and leave all other fields unchanged
249 # Generator profiles are listed in the following format:
250 # `name`: Traffic generator profile name (use a unique name, no space or special character)
251 # Do not change this field
252 # `tool`: Traffic generator tool to be used (currently supported is `TRex`).
253 # Do not change this field
254 # `ip`: IP address of the traffic generator.
255 # The default loopback address is used when the traffic generator runs on the same host
257 # `cores`: Specify the number of cores for running the TRex traffic generator.
258 # ONLY applies to trex-local.
259 # `software_mode`: Advice TRex to use software mode which provides the best compability. But
260 # note that TRex will not use any hardware acceleration technology under
261 # software mode, therefore the performance of TRex will be significantly
262 # lower. ONLY applies to trex-local.
263 # Recommended to leave the default value (false)
264 # `limit_memory`: Specify the memory reserved for running the TRex traffic generator (in MB). Limit the amount
265 # of packet memory used. (Passed to dpdk as -m arg)
266 # ONLY applies to trex-local.
267 # `zmq_pub_port`: Specify the ZMQ pub port number for the TRex traffic generator instance (default value is 4500).
268 # ONLY applies to trex-local.
269 # `zmq_rpc_port`: Specify the ZMQ rpc port for the TRex traffic generator instance (default value is 4501).
270 # ONLY applies to trex-local.
271 # `interfaces`: Configuration of traffic generator interfaces.
272 # `interfaces.port`: The port of the traffic generator to be used (leave as 0 and 1 resp.)
273 # `interfaces.switch_port`: Leave empty (deprecated)
274 # `interfaces.pci`: The PCI address of the intel NIC interface associated to this port
275 # This field is required and cannot be empty
276 # Use lspci to list the PCI address of all devices
277 # Example of value: "0000:5e:00.0"
278 # `intf_speed`: The speed of the interfaces used by the traffic generator (per direction).
279 # Empty value (default) to use the speed discovered by the traffic generator.
280 # Recommended to leave this field empty.
281 # Do not use unless you want to override the speed discovered by the
282 # traffic generator. Expected format: 10Gbps
284 # `platform`: Optional. Used to tune the performance and allocate the cores to the right NUMA.
285 # See https://trex-tgn.cisco.com/trex/doc/trex_manual.html (6.2.3. Platform section configuration)
287 # `platform.master_thread_id`: Hardware thread_id for control thread. (Valid value is mandatory if platform property is set)
288 # `platform.latency_thread_id`: Hardware thread_id for RX thread. (Valid value is mandatory if platform property is set)
289 # `platform.dual_if`: Section defines info for interface pairs (according to the order in “interfaces” list). (Valid value is mandatory if platform property is set)
290 # Each section, starting with “- socket” defines info for different interface pair. (Valid value is mandatory if platform property is set)
291 # `platform.dual_if.socket`: The NUMA node from which memory will be allocated for use by the interface pair. (Valid value is mandatory if platform property is set)
292 # `platform.dual_if.threads`: Hardware threads to be used for sending packets for the interface pair. (Valid value is mandatory if platform property is set)
293 # Threads are pinned to cores, so specifying threads actually determines the hardware cores.
296 # master_thread_id: 0
297 # latency_thread_id: 2
326 # Use 'true' to force restart of local TRex server before next run
327 # TRex local server will be restarted even if restart property is false in case of generator config changes between runs
330 # Simpler override for trex core count and mbuf multilier factor
331 # if empty defaults to the one specified in generator_profile.cores
334 # Simpler override for the interface speed
335 # if empty, the current generator_profile.intf_speed parameter applies
336 # if value = 'auto' the auto-detection is forced
339 # 'cores' and 'intf_speed' parameters can be overriden themselves
340 # by respective options --cores and --intf-speed on the command-line.
342 # By default, the real ports line rate is detected and used as
343 # the reference for computing the theoretical maximum traffic load (100%).
344 # Note that specifying 'intf_speed' allows to artificially lower this
345 # reference while not modifying the actual transmission bit rate.
347 # The values of the following parameters are ignored on entry
348 # they are defined here in order to appear in the reported configuration.
349 # They will reflect the value active at run-time (after overriding or detection)
354 # Add cache size in packet generation for TRex field engine (FE).
355 # More information for TRex performance:
356 # https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_tutorial_field_engine_significantly_improve_performance
357 # If cache_size = 0 (or empty): no cache will be used by TRex (default)
358 # If cache_size < 0: cache_size will be set to flow count value
360 # The cache size is actually limited by the number of 64B mbufs configured in the trex platform configuration (see Trex manual 6.2.2. Memory section configuration)
361 # Note that the resulting value is finally clipped to 10000, whatever the requested size is (by design limitation).
363 # Trex will use 1 x 64B mbuf per pre-built cached packet, assuming 1 pre-built cached packet per flow, it means for very large number of flows, the number of configured mbuf_64 will need to be set accordingly.
366 # mbuffer ratio to use for TRex (see TRex documentation for more details)
369 # A switch to disable hdrh
370 # hdrh is enabled by default and requires TRex v2.58 or higher
373 # List of latency percentiles values returned using hdrh
374 # elements should be int or float between 0.0 and 100.0
375 lat_percentiles: [25, 75, 99]
377 # -----------------------------------------------------------------------------
378 # These variables are not likely to be changed
380 # Number of seconds to wait for VMs to pass traffic in both directions
381 check_traffic_time_sec: 200
383 # General retry count
384 generic_retry_count: 100
386 # General poll period
389 # name of the loop VM
390 loop_vm_name: 'nfvbench-loop-vm'
392 # Default names, subnets and CIDRs for PVP/PVVP networks (openstack only)
394 # If a network with given name already exists it will be reused.
395 # - PVP only uses left and right
396 # - PVVP uses left, middle and right
397 # - for EXT chains, this structure is not relevant - refer to external_networks
398 # Otherwise a new internal network will be created with that name, subnet and CIDR.
400 # network_type must be 'vlan' (for VLAN and SRIOV) or 'vxlan' (for VxLAN)
401 # all 3 networks must use the same network type in this release
402 # segmentation_id can be set to enforce a specific segmentation id (vlan ID or VNI if vxlan)
403 # by default (empty) the segmentation id will be assigned by Neutron.
404 # If specified, it must be unique for each network
405 # For multi-chaining, see notes below
406 # physical_network can be set to pick a specific phsyical network - by default (empty) the
407 # default physical network will be picked
408 # SR-IOV: both physical_network and VLAN segmentation ID must be provided
409 # VxLAN: the VNI must generally be provided (except special Neutron VxLAN implementations)
411 # For example to setup 1xPVP using 2 different SR-IOV ports, you must put the appropriate physnet
412 # names under left.physical_network and right.physical_network.
413 # For multi-chaining and non shared networks,
414 # Example of override configuration to force PVP to run on 2 SRIOV ports (phys_sriov0 and phys_sriov1)
415 # using VLAN ID 2000 and 2001:
418 # segmentation_id: 2000
419 # physical_network: phys_sriov0
421 # segmentation_id: 2001
422 # physical_network: phys_sriov1
424 # For multi-chaining and non shared network mode (VLAN, SRIOV, VxLAN, MPLS):
425 # - the segmentation_id field if provided must be a list of values (as many as chains)
426 # - segmentation_id auto-indexing:
427 # the segmentation_id field can also be a single value that represents the base value from which
428 # values for each chain is derived using the chain ID as an offset. For example
429 # if 2000 is specified, NFVbench will use 2000 for chain 0, 2001 for chain 1 etc...
430 # The ranges of all the networks must not overlap.
431 # - the physical_network can be a single name (all VFs to be allocated on same physnet)
432 # of a list of physnet names to use different PFs
434 # Example of 2-chain VLAN configuration:
437 # segmentation_id: [2000, 2001]
438 # physical_network: phys_sriov0
440 # segmentation_id: [2010, 2011]
441 # physical_network: phys_sriov1
442 # Equivalent to (using auto-indexing):
445 # segmentation_id: 2000
446 # physical_network: phys_sriov0
448 # segmentation_id: 2010
449 # physical_network: phys_sriov1
451 # - mpls_transport_labels is used only when MPLS encapsulation is enabled (mpls: true)
452 # this parameter doesn't support auto-indexing because this is not a typical scenario
453 # expected the list of values in a range 256-1048575, one value per chain is expected
455 # In the bellow configuration example 'segmentation_id; contains the inner MPLS label for each chain
456 # and 'mpls_transport_labels' contains the outer transport MPLS label for each chain
457 # Example of 2-chain MPLS configuration:
461 # segmentation_id: [2000, 2001]
462 # mpls_transport_labels: [10000, 10000]
463 # physical_network: phys_sriov0
466 # segmentation_id: [2010, 2011]
467 # mpls_transport_labels: [11000, 11000]
468 # physical_network: phys_sriov1
473 name: 'nfvbench-lnet'
474 subnet: 'nfvbench-lsubnet'
475 cidr: '192.168.1.0/24'
479 mpls_transport_labels:
481 name: 'nfvbench-rnet'
482 subnet: 'nfvbench-rsubnet'
483 cidr: '192.168.2.0/24'
487 mpls_transport_labels:
489 name: 'nfvbench-mnet'
490 subnet: 'nfvbench-msubnet'
491 cidr: '192.168.3.0/24'
495 mpls_transport_labels:
497 # IDLE INTERFACES: PVP, PVVP and non shared net only.
498 # By default each test VM will have 2 virtual interfaces for looping traffic.
499 # If service_chain_shared_net is false, additional virtual interfaces can be
500 # added at VM creation time, these interfaces will not carry any traffic and
501 # can be used to test the impact of idle interfaces in the overall performance.
502 # All these idle interfaces will use normal ports (not direct).
503 # Number of idle interfaces per VM (none by default)
504 idle_interfaces_per_vm: 0
506 # A new network is created for each idle interface.
507 # If service_chain_shared_net is true, the options below will be ignored
508 # and no idle interfaces will be added.
510 # Prefix for all idle networks, the final name will append the chain ID and idle index
511 # e.g. "nfvbench-idle-net.0.4" chain 0 idle index 4
512 name: 'nfvbench-idle-net'
513 # Subnet name to use for all idle subnetworks
514 subnet: 'nfvbench-idle-subnet'
515 # CIDR to use for all idle networks (value should not matter)
516 cidr: '192.169.1.0/24'
517 # Type of network associated to the idle virtual interfaces (vlan or vxlan)
519 # segmentation ID to use for the network attached to the idle virtual interfaces
520 # vlan: leave empty to let neutron pick the segmentation ID
521 # vxlan: must specify the starting VNI value to be used (cannot be empty)
522 # Note that NFVbench will use as many consecutive segmentation IDs as needed.
523 # For example, for 4 PVP chains and 8 idle
524 # interfaces per VM, NFVbench will use 32 consecutive values of segmentation ID
525 # starting from the value provided.
527 # physnet name to use for all idle interfaces
530 # MANAGEMENT INTERFACE
531 # By default each test VM will have 2 virtual interfaces for looping traffic.
532 # If use_management_port is true, additional virtual interface can be
533 # added at VM creation time, this interface will be used for VM management over SSH.
534 # This will be helpful for debug (forwarder config, capture traffic...)
535 # or to emulate VNF with management interface
536 use_management_port: false
538 # If a network with given name already exists it will be reused.
539 # Otherwise a new network is created for management interface.
540 # If use_management_port is false, the options below will be ignored
541 # and no management interface will be added.
543 name: 'nfvbench-management-net'
544 # Subnet name to use for management subnetwork
545 subnet: 'nfvbench-management-subnet'
546 # CIDR to use for management network
547 cidr: '192.168.0.0/24'
548 gateway: '192.168.0.254'
549 # Type of network associated to the management virtual interface (vlan or vxlan)
551 # segmentation ID to use for the network attached to the management virtual interface
552 # vlan: leave empty to let neutron pick the segmentation ID
553 # vxlan: must specify the starting VNI value to be used (cannot be empty)
555 # physnet name to use for all idle interfaces
558 # Floating IP for management interface
559 # If use_floating_ip is true, floating IP will be set on management interface port
560 # One floating IP by loop VM will be used (floating ips are often limited,
561 # use them on limited context mainly for debug). If there are 10 PVP chains, this will require 10
562 # floating IPs. If 10 PVVP chains, it will require 20 floating IPs
563 use_floating_ip: false
565 # If a network with given name already exists it will be reused.
566 # Set same name as management_network if you want to use a floating IP from this network
567 # Otherwise set name, subnet and CIDR information from your floating IP pool network
568 # Floating network used to set floating IP on management port.
569 # Only 1 floating network will be used for all VMs and chains (shared network).
570 # If use_floating_ip is false, the options below will be ignored
571 # and no floating IP will be added.
573 name: 'nfvbench-floating-net'
574 # Subnet name to use for floating subnetwork
575 subnet: 'nfvbench-floating-subnet'
576 # CIDR to use for floating network
577 cidr: '192.168.0.0/24'
578 # Type of network associated to the management virtual interface (vlan or vxlan)
580 # segmentation ID to use for the network attached to the management virtual interface
581 # vlan: leave empty to let neutron pick the segmentation ID
582 # vxlan: must specify the starting VNI value to be used (cannot be empty)
584 # physnet name to use for all idle interfaces
587 # In the scenario of PVVP + SRIOV, there is choice of how the traffic will be
588 # handled in the middle network. The default (false) will use vswitch, while
589 # SRIOV can be used by toggling below setting.
590 use_sriov_middle_net: false
592 # EXT chain only. Prefix names of edge networks or list of edge network names
593 # used to send traffic via traffic generator.
595 # If service_chain_shared_net is true, the left and right networks must pre-exist and match exactly by name.
597 # If service_chain_shared_net is false, each chain must have its own pre-existing left and right networks.
598 # left and right can take either a string prefix or a list of arbitrary network names
599 # If a string prefix is passed, an index will be appended to each network name to form the final name.
604 # ext-lnet0 ext-rnet0 for chain #0
605 # ext-lnet1 ext-rnet1 for chain #1
607 # If a list of strings is passed, each string in the list must be the name of the network used for the
608 # chain indexed by the entry position in the list.
609 # The list must have at least as many entries as there are chains
612 # left: ['ext-lnet', 'ext-lnet2']
613 # right: ['ext-rnet', 'ext-rnet2']
619 # PVP with L3 router in the packet path only.
620 # Only use when l3_router option is True (see l3_router)
621 # Prefix names of edge networks which will be used to send traffic via traffic generator.
622 # If a network with given name already exists it will be reused.
623 # Otherwise a new edge network will be created with that name, subnet and CIDR.
625 # gateway can be set in case of L3 traffic with edge networks - refer to edge_networks
627 # segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id
628 # will be assigned by Neutron.
629 # Must be unique for each network
630 # physical_network can be set to pick a specific phsyical network - by default (empty) the
631 # default physical network will be picked
635 name: 'nfvbench-net2'
636 router_name: 'router_left'
637 subnet: 'nfvbench-subnet2'
638 cidr: '192.168.3.0/24'
644 name: 'nfvbench-net3'
645 router_name: 'router_right'
646 subnet: 'nfvbench-subnet3'
647 cidr: '192.168.4.0/24'
653 # Use 'true' to enable VXLAN encapsulation support and sent by the traffic generator
654 # When this option enabled internal networks 'network type' parameter value should be 'vxlan'
655 # VxLAN and MPLS encapsulations are mutual exclusive if 'vxlan' is true then 'mpls' should be false
658 # Use 'true' to enable MPLS encapsulation support and sent by the traffic generator
659 # When this option enabled internal networks 'network type' parameter value should be 'mpls'
660 # MPLS and VxLAN encapsulations are mutual exclusive if 'mpls' is 'true' then 'vxlan' should be set to 'false'
661 # and vise versa. no_flow_stats, no_latency_stats, no_latency_streams should be set to 'true' because these
662 # features are not supported at the moment. In future when these features will be supported they will require
663 # special NIC hardware. Only 2 label stack supported at the moment where one label is transport and another
664 # is VPN for more details please refer to 'mpls_transport_labels' and 'segmentation_id' in networks configuration
666 # Use 'true' to enable VLAN tagging of packets generated and sent by the traffic generator
667 # Leave empty or set to false if you do not want the traffic generator to insert the VLAN tag (this is
668 # needed for example if VLAN tagging is enabled on switch (access mode) or if you want to hook
669 # directly to a NIC).
670 # By default is set to true (which is the nominal use case with TOR and trunk mode to Trex ports)
671 # If VxLAN or MPLS are enabled, this option should be set to false (vlan tagging for encapsulated packets
672 # is not supported). Use the vtep_vlan option to enable vlan tagging for the VxLAN overlay network.
675 # Used only in the case of EXT chain and no openstack or not admin access to specify the VLAN IDs to use.
676 # This property is ignored when OpenStakc is used or in the case of l2-loopback.
677 # If OpenStack is used leave the list empty, VLAN IDs are retrieved from OpenStack networks using Neutron API.
678 # If networks are shared across all chains (service_chain_shared_net=true), the list should have exactly 2 values
679 # If networks are not shared across chains (service_chain_shared_net=false), the list should have
681 # In the special case of l2-loopback the list should have the same VLAN id for all chains
683 # [1998, 1999] left network uses vlan 1998 right network uses vlan 1999
684 # [[1,2],[3,4]] chain 0 left vlan 1, right vlan 2 - chain 1 left vlan 3 right vlan 4
685 # [1010, 1010] same VLAN id with l2-loopback enabled
689 # ARP is used to discover the MAC address of VNFs that run L3 routing.
690 # Used only with EXT chain.
691 # False (default): ARP requests are sent to find out dest MAC addresses.
692 # True: do not send ARP but use provisioned dest macs instead
693 # (see mac_addrs_left and mac_addrs_right)
696 # Loop VM (VPP forwarder) can use ARP to discover next hop mac address
697 # False (default): do not send ARP but use static config devices macs instead (TRex gratuitous ARP are not interpreted by VPP)
698 # True: ARP requests are sent to find out next hop MAC addresses (for instance SDN-GW)
702 # You can add here more profiles as needed
703 # `l2frame_size` can be specified in any none zero integer value to represent the size in bytes
704 # of the L2 frame, or "IMIX" to represent the standard 3-packet size mixed sequence (IMIX1).
706 - name: traffic_profile_64B
708 - name: traffic_profile_IMIX
709 l2frame_size: ['IMIX']
710 - name: traffic_profile_1518B
711 l2frame_size: ['1518']
712 - name: traffic_profile_3sizes
713 l2frame_size: ['64', 'IMIX', '1518']
715 # Traffic Configuration
716 # bidirectional: to have traffic generated from both direction, set bidirectional to true
717 # profile: must be one of the profiles defined in traffic_profile
718 # The traffic profile can be overriden with the options --frame-size and --uni-dir
721 profile: traffic_profile_64B
723 # Check config and connectivity only - do not generate traffic
724 # Can be overriden by --no-traffic
727 # Use an L3 router in the packet path. This option if set will create or reuse an openstack neutron
728 # router (PVP, PVVP) or reuse an existing L3 router (EXT) to route traffic to the destination VM.
729 # Can be overriden by --l3-router
734 # The rate pps for traffic going in reverse direction in case of unidirectional flow. Default to 1.
735 unidir_reverse_traffic_pps: 1
737 # The rate specifies if NFVbench should determine the NDR/PDR
738 # or if NFVbench should just generate traffic at a given fixed rate
739 # for a given duration (called "single run" mode)
740 # Supported rate format:
741 # NDR/PDR test: `ndr`, `pdr`, `ndr_pdr` (default)
742 # Or for single run mode:
743 # Packet per second: pps (e.g. `50pps`)
744 # Bits per second: bps, kbps, Mbps, etc (e.g. `1Gbps`, `1000bps`)
745 # Load percentage: % (e.g. `50%`)
746 # Can be overridden by --rate
749 # Default run duration (single run at given rate only)
750 # Can be overridden by --duration
753 # Interval between intermediate reports when interval reporting is enabled
754 # Can be overridden by --interval
757 # Default pause between iterations of a binary search (NDR/PDR)
760 # NDR / PDR configuration
762 # Drop rates represent the ratio of dropped packet to the total number of packets sent.
763 # Values provided here are percentages. A value of 0.01 means that at most 0.01% of all
764 # packets sent are dropped (or 1 packet every 10,000 packets sent)
766 # No Drop Rate in percentage; Default to 0.001%
768 # Partial Drop Rate in percentage; NDR should always be less than PDR
770 # The accuracy of NDR and PDR as a percnetage of line rate; The exact NDR
771 # or PDR should be within `load_epsilon` line rate % from the one calculated.
772 # For example, with a value 0.1, and a line rate of 10Gbps, the accuracy
773 # of NDR and PDR will be within 0.1% Of 10Gbps or 10Mbps.
774 # The lower the value the more iterations and the longer it will take to find the NDR/PDR.
775 # In practice, due to the precision of the traffic generator it is not recommended to
776 # set it to lower than 0.1
779 # Location where to store results in a JSON format. Must be container specific path.
780 # Can be overriden by --json
783 # Location where to store results in the NFVbench standard JSON format:
784 # <service-chain-type>-<service-chain-count>-<flow-count>-<packet-sizes>.json
785 # Example: PVP-1-10-64-IMIX.json
786 # Must be container specific path.
787 # Can be overriden by --std-json
790 # Prints debug messages (verbose mode)
791 # Can be overriden by --debug
794 # Set to a valid path name if logging to file is to be enabled
795 # Defaults to disabled
798 # When enabled, all results and/or logs will be sent to a fluentd servers at the requested IPs and ports
799 # A list of one or more fluentd servers identified by their IPs and port numbers should be given.
800 # For each recipient it is possible to enable both sending logs and performance
801 # results, or enable either logs or performance results. For enabling logs or results logging_tag or
802 # result_tag should be set.
805 # by default (logging_tag is empty) nfvbench log messages are not sent to fluentd
806 # to enable logging to fluents, specify a valid fluentd tag name to be used for the
810 # by default (result_tag is empty) nfvbench results are not sent to fluentd
811 # to enable sending nfvbench results to fluentd, specify a valid fluentd tag name
812 # to be used for the results records, which is different than logging_tag
815 # IP address of the server, defaults to loopback
818 # port # to use, by default, use the default fluentd forward port
821 # by default (logging_tag is empty) nfvbench log messages are not sent to fluentd
822 # to enable logging to fluents, specify a valid fluentd tag name to be used for the
825 # Module and class name of factory which will be used to provide classes dynamically for other components.
826 factory_module: 'nfvbench.factory'
827 factory_class: 'BasicFactory'
829 # Custom label added for every perf record generated during this run.
830 # Can be overriden by --user-label
833 # Custom information to be passed to results post-processing,
834 # they will be included as is in the json report 'config' branch.
835 # Useful for documenting or automating further treatments.
836 # The value is any yaml object (=> open usage) - example:
841 # | attachment: direct
844 # Keys may be merged/overriden using the --user-info command line option
845 # (the command-line parameter value is expressed as a json object string)
848 # THESE FIELDS SHOULD BE USED VERY RARELY OR ON PURPOSE
850 # Skip vswitch configuration and retrieving of stats
851 # Can be overriden by --no-vswitch-access
852 # Should be left to the default value (false)
853 no_vswitch_access: false
855 # Enable service mode for trafic capture from TRex console (for debugging purpose)
856 # Can be overriden by --service-mode
857 # Should be left to the default value (false)
860 # Disable extra flow stats (on high load traffic)
861 # Can be overriden by --no-flow-stats
862 # Should be left to the default value (false)
865 # Disable flow stats for latency traffic
866 # Can be overriden by --no-latency-stats
867 # Should be left to the default value (false)
868 no_latency_stats: false
870 # Disable latency measurements (no streams)
871 # Can be overriden by --no-latency-streams
872 # Should be left to the default value (false)
873 no_latency_streams: false
875 # Skip "end to end" connectivity check on traffic setup
876 # Can be overriden by --no-e2e-check
877 # Should be left to the default value (false)
878 # This flag is usable for traffic generation only
881 # General purpose register (debugging flags)
882 # Can be overriden by --debug-mask
883 # Designed for development needs
884 # The hexadecimal notation (0x...) is accepted.
885 debug_mask: 0x00000000