2 # NFVbench default configuration file
4 # This configuration file is ALWAYS loaded by NFVbench and should never be modified by users.
5 # To specify your own property values, always define them in a separate config file
6 # and pass that file to the script using -c or --config <file>
7 # Property values in that config file will override the default values in the current file
10 # IMPORTANT CUSTOMIZATION NOTES
11 # There are roughly 2 types of NFVbench config based on the OpenStack encaps used:
12 # - VLAN (OVS, OVS-DPDK, ML2/VPP)
13 # Many of the fields to customize are relevant to only 1 of the 2 encaps
14 # These will be clearly labeled "VxLAN only" or "VLAN only"
15 # Fields that are not applicable will not be used by NFVbench and can be left empty
17 # All fields are applicable to all encaps/traffic generators unless explicitly marked otherwise.
18 # Fields that can be over-ridden at the command line are marked with the corresponding
19 # option, e.g. "--interval"
22 # The OpenStack openrc file to use - must be a valid full pathname. If running
23 # in a container, this path must be valid in the container.
25 # The only case where this field can be empty is when measuring a system that does not run
26 # OpenStack or when OpenStack APIs are not accessible or OpenStack APis use is not
27 # desirable. In that case the EXT service chain must be used.
29 # If openrc is not admin some parameters are mandatory and must be filled with valid values in config file such as :
31 # - hypervisor_hostname
35 # Forwarder to use in nfvbenchvm image. Available options: ['vpp', 'testpmd']
38 # By default (empty) NFVbench will try to locate a VM image file
39 # from the package root directory named "nfvbench-<version>.qcow2" and
40 # upload that file. The image name will be "nfvbench-<version>"
41 # This can be overridden by specifying here a pathname of a file
42 # that follows the same naming convention.
43 # In most cases, this field should be left empty as the packaging should
44 # include the proper VM image file
47 # Name of the flavor to use for the loopback VMs
49 # If the provided name is an exact match to a flavor name known by OpenStack
50 # (as shown from 'nova flavor-list'), that flavor will be reused.
51 # Otherwise, a new flavor will be created with attributes listed below.
52 flavor_type: 'nfvbench.medium'
54 # Custom flavor attributes for the test VM
56 # Number of vCPUs for the flavor, must be at least 2!
58 # Memory for the flavor in MB
60 # Size of local disk in GB
62 # metadata are supported and can be added if needed, optional
63 # note that if your openstack does not have NUMA optimization
64 # (cpu pinning and huge pages)
65 # you must comment out extra_specs completely otherwise
66 # loopback VM creation will fail
68 "hw:cpu_policy": dedicated
69 "hw:mem_page_size": large
71 # Enable multiqueue for all test VM interfaces (PVP and PVVP only).
72 # When enabled, the test VM image will get added the property to enable
73 # multiqueue (hw_vif_multiqueue_enabled='true').
74 # The number of queues per interace will be set to the number of vCPUs configured for
76 # By default there is only 1 queue per interface
77 # The max allowed queue per interface is 8.
78 # The valid range for this parameter is [1..min(8, vcpu_count)]
79 # When multiqueue is used the recommended setting is to set it to same value as the
80 # number of vCPU used - up to a max of 8 queues.
81 # Setting to a lower value than vCPU should also work. For example if using 4 vCPU and
82 # vif_multiqueue_size is set to 2, openstack will create 4 queues per interface but the
83 # test VM will only use the first 2 queues.
84 vif_multiqueue_size: 1
86 # Name of the availability zone to use for the test VMs
87 # Must be one of the zones listed by 'nova availability-zone-list'
88 # availability_zone: 'nova'
89 # If openrc is not admin set a valid value
91 # To force placement on a given hypervisor, set the name here
92 # (if multiple names are provided, the first will be used)
93 # Leave empty to let openstack pick the hypervisor
95 # If openrc is not admin set a valid value for hypervisor hostname
96 # Example of value: hypervisor_hostname: "server1"
99 # Type of service chain to run, possible options are PVP, PVVP and EXT
100 # PVP - port to VM to port
101 # PVVP - port to VM to VM to port
102 # EXT - external chain used only for running traffic and checking traffic generator counters,
103 # all other parts of chain must be configured manually
104 # Can be overriden by --service-chain
107 # Total number of service chains, every chain has own traffic stream
108 # Can be overriden by --service-chain-count
109 service_chain_count: 1
111 # Specifies if all chains share the same right/left/middle networks
112 service_chain_shared_net: false
114 # Total number of traffic flows for all chains and directions generated by the traffic generator.
115 # Minimum is '2 * service_chain_count', it is automatically adjusted if too small
116 # value was configured. Must be even.
117 # Every flow has packets with different IPs in headers
118 # Can be overriden by --flow-count
121 # set to true if service chains should use SRIOV
122 # This requires SRIOV to be available on compute nodes
125 # Perform port to port loopback (direct or through switch)
126 # Should be used with EXT service chain and no ARP (no_arp: true)
127 # When enabled, the vlans property must contain the same VLAN id for all chains.
128 # Can be overriden by --l2-loopback
131 # Resources created by NFVbench will not be removed
132 # Can be overriden by --no-cleanup
135 # Configuration for traffic generator
137 # Name of the traffic generator, only for informational purposes
138 host_name: 'nfvbench_tg'
139 # this is the default traffic generator profile to use
140 # the name must be defined under generator_profile
141 # you can override the traffic generator to use using the
142 # -g or --traffic-gen option at the command line
143 default_profile: trex-local
145 # IP addresses for L3 traffic.
146 # This section describes the addresses to use to fill in the UDP packets sent by the
147 # traffic generator. If you VNFs are L2 forwarders, these fields below do not need to change.
148 # If your VNFs are L3 routers, the fields below must match the static routes in your VNFs
149 # so that UDP packets can be routed back to the peer port of the traffic generator.
151 # All of the IPs are used as base for IP sequence computed based on chain or flow count.
152 # (sim-devices-left)---(tg-gateway-left)---(vnf-left)- ...
153 # -(vnf-right)---(tg-gateway-right)---(sim-devices-right)
155 # `ip_addrs` base IPs used as src and dst in packet header, quantity depends on flow count
156 # these are used for addressing virtual devices simulated by the traffic generator
157 # and be a different subnet than tg_gateway_ip_addrs and gateway_ip_addrs
158 # `ip_addrs_step`: step for generating IP sequence. Use "random" for random patterns, default is 0.0.0.1.
159 ip_addrs: ['10.0.0.0/8', '20.0.0.0/8']
160 ip_addrs_step: 0.0.0.1
161 # `tg_gateway_ip_addrs` base IP for traffic generator ports in the left and right networks to the VNFs
162 # chain count consecutive IP addresses spaced by tg_gateway_ip_addrs_step will be used
163 # `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1
164 tg_gateway_ip_addrs: ['1.1.0.100', '2.2.0.100']
165 tg_gateway_ip_addrs_step: 0.0.0.1
166 # `gateway_ip_addrs`: base IPs of VNF router gateways (left and right), quantity used depends on chain count
167 # must correspond to the public IP on the left and right networks
168 # for each left-most and right-most VNF of every chain.
169 # must be the same subnet but not same IP as tg_gateway_ip_addrs.
170 # chain count consecutive IP addresses spaced by gateway_ip_addrs_step will be used
171 # `gateway_ip_addrs_step`: step for generating router gateway sequences. default is 0.0.0.1
172 gateway_ip_addrs: ['1.1.0.2', '2.2.0.2']
173 gateway_ip_addrs_step: 0.0.0.1
174 # `udp_src_port`: the source port for sending UDP traffic, default is picked by TRex (53)
175 # `udp_dst_port`: the destination port for sending UDP traffic, default is picked by TRex (53)
179 # VxLAN only: optionally specify what VLAN tag to use for the VxLAN overlay
180 # This is used if the vxlan tunnels are running on a specific VLAN.
181 # Leave empty if there is no VLAN tagging required, or specify the VLAN id to use
182 # for all VxLAN tunneled traffic
184 # VxLAN only: local/source vteps IP addresses for port 0 and 1 ['10.1.1.230', '10.1.1.231']
186 # VxLAN only: remote IP address of the remote VTEPs that terminate all tunnels originating from local VTEPs
189 # L2 ADDRESSING OF UDP PACKETS
190 # Lists of dest MAC addresses to use on each traffic generator port (one dest MAC per chain)
191 # Leave empty for PVP, PVVP, EXT with ARP
192 # Only used when `service_chain` is EXT and `no_arp` is true.
193 # - If both lists are empty the far end MAC of the traffic generator will be used for left and right
194 # (this is typicaly used to loop back on the first hop switch or using a loopback cable)
195 # - The length of each list must match the number of chains being used!
196 # - The index of each list must correspond to the chain index to ensure proper pairing.
197 # - Below is an example of using two chains:
198 # - mac_addrs_left: ['00:00:00:00:01:00', '00:00:00:00:02:00']
199 # - mac_addrs_right: ['00:00:00:00:01:01', '00:00:00:00:02:01']
200 # UDP packets sent on port 0 will use dest MAC '00:00:00:00:01:00' for chain #0 and
201 # dest MAC '00:00:00:00:02:00' for chain #1
202 # UDP packets sent on port 1 will use dest MAC '00:00:00:00:01:01' for chain #0 and
203 # dest MAC '00:00:00:00:02:01' for chain #1
204 # It is expected that the looping device (L2 forwarder) will rewrite the src and dst MAC
205 # of the looping UDP packet so that it can reach back to the peer port of the traffic
211 # Traffic Generator Profiles
212 # In case you have multiple testbeds or traffic generators,
213 # you can define one traffic generator profile per testbed/traffic generator.
214 # In most cases you only need to fill in the pci address for the 2 ports used by the
215 # traffic generator and leave all other fields unchanged
217 # Generator profiles are listed in the following format:
218 # `name`: Traffic generator profile name (use a unique name, no space or special character)
219 # Do not change this field
220 # `tool`: Traffic generator tool to be used (currently supported is `TRex`).
221 # Do not change this field
222 # `ip`: IP address of the traffic generator.
223 # The default loopback address is used when the traffic generator runs on the same host
225 # `cores`: Specify the number of cores for running the TRex traffic generator.
226 # ONLY applies to trex-local.
227 # `software_mode`: Advice TRex to use software mode which provides the best compability. But
228 # note that TRex will not use any hardware acceleration technology under
229 # software mode, therefore the performance of TRex will be significantly
230 # lower. ONLY applies to trex-local.
231 # Recommended to leave the default value (false)
232 # `limit_memory`: Specify the memory reserved for running the TRex traffic generator (in MB). Limit the amount
233 # of packet memory used. (Passed to dpdk as -m arg)
234 # ONLY applies to trex-local.
235 # `zmq_pub_port`: Specify the ZMQ pub port number for the TRex traffic generator instance (default value is 4500).
236 # ONLY applies to trex-local.
237 # `zmq_rpc_port`: Specify the ZMQ rpc port for the TRex traffic generator instance (default value is 4501).
238 # ONLY applies to trex-local.
239 # `interfaces`: Configuration of traffic generator interfaces.
240 # `interfaces.port`: The port of the traffic generator to be used (leave as 0 and 1 resp.)
241 # `interfaces.switch_port`: Leave empty (deprecated)
242 # `interfaces.pci`: The PCI address of the intel NIC interface associated to this port
243 # This field is required and cannot be empty
244 # Use lspci to list the PCI address of all devices
245 # Example of value: "0000:5e:00.0"
246 # `intf_speed`: The speed of the interfaces used by the traffic generator (per direction).
247 # Empty value (default) to use the speed discovered by the traffic generator.
248 # Recommended to leave this field empty.
249 # Do not use unless you want to override the speed discovered by the
250 # traffic generator. Expected format: 10Gbps
252 # `platform`: Optional. Used to tune the performance and allocate the cores to the right NUMA.
253 # See https://trex-tgn.cisco.com/trex/doc/trex_manual.html (6.2.3. Platform section configuration)
255 # `platform.master_thread_id`: Hardware thread_id for control thread. (Valid value is mandatory if platform property is set)
256 # `platform.latency_thread_id`: Hardware thread_id for RX thread. (Valid value is mandatory if platform property is set)
257 # `platform.dual_if`: Section defines info for interface pairs (according to the order in “interfaces” list). (Valid value is mandatory if platform property is set)
258 # Each section, starting with “- socket” defines info for different interface pair. (Valid value is mandatory if platform property is set)
259 # `platform.dual_if.socket`: The NUMA node from which memory will be allocated for use by the interface pair. (Valid value is mandatory if platform property is set)
260 # `platform.dual_if.threads`: Hardware threads to be used for sending packets for the interface pair. (Valid value is mandatory if platform property is set)
261 # Threads are pinned to cores, so specifying threads actually determines the hardware cores.
264 # master_thread_id: 0
265 # latency_thread_id: 2
294 # Use 'true' to force restart of local TRex server before next run
295 # TRex local server will be restarted even if restart property is false in case of generator config changes between runs
298 # Simpler override for trex core count and mbuf multilier factor
299 # if empty defaults to the one specified in generator_profile.cores
302 # mbuffer ratio to use for TRex (see TRex documentation for more details)
305 # -----------------------------------------------------------------------------
306 # These variables are not likely to be changed
308 # Number of seconds to wait for VMs to pass traffic in both directions
309 check_traffic_time_sec: 200
311 # General retry count
312 generic_retry_count: 100
314 # General poll period
317 # name of the loop VM
318 loop_vm_name: 'nfvbench-loop-vm'
320 # Default names, subnets and CIDRs for PVP/PVVP networks (openstack only)
322 # If a network with given name already exists it will be reused.
323 # - PVP only uses left and right
324 # - PVVP uses left, middle and right
325 # - for EXT chains, this structure is not relevant - refer to external_networks
326 # Otherwise a new internal network will be created with that name, subnet and CIDR.
328 # network_type must be 'vlan' (for VLAN and SRIOV) or 'vxlan' (for VxLAN)
329 # all 3 networks must use the same network type in this release
330 # segmentation_id can be set to enforce a specific segmentation id (vlan ID or VNI if vxlan)
331 # by default (empty) the segmentation id will be assigned by Neutron.
332 # If specified, it must be unique for each network
333 # For multi-chaining, see notes below
334 # physical_network can be set to pick a specific phsyical network - by default (empty) the
335 # default physical network will be picked
336 # SR-IOV: both physical_network and VLAN segmentation ID must be provided
337 # VxLAN: the VNI must generally be provided (except special Neutron VxLAN implementations)
339 # For example to setup 1xPVP using 2 different SR-IOV ports, you must put the appropriate physnet
340 # names under left.physical_network and right.physical_network.
341 # For multi-chaining and non shared networks,
342 # Example of override configuration to force PVP to run on 2 SRIOV ports (phys_sriov0 and phys_sriov1)
343 # using VLAN ID 2000 and 2001:
346 # segmentation_id: 2000
347 # physical_network: phys_sriov0
349 # segmentation_id: 2001
350 # physical_network: phys_sriov1
352 # For multi-chaining and non shared network mode (VLAN, SRIOV, VxLAN):
353 # - the segmentation_id field if provided must be a list of values (as many as chains)
354 # - segmentation_id auto-indexing:
355 # the segmentation_id field can also be a single value that represents the base value from which
356 # values for each chain is derived using the chain ID as an offset. For example
357 # if 2000 is specified, NFVbench will use 2000 for chain 0, 2001 for chain 1 etc...
358 # The ranges of all the networks must not overlap.
359 # - the physical_network can be a single name (all VFs to be allocated on same physnet)
360 # of a list of physnet names to use different PFs
362 # Example of 2-chain configuration:
365 # segmentation_id: [2000, 2001]
366 # physical_network: phys_sriov0
368 # segmentation_id: [2010, 2011]
369 # physical_network: phys_sriov1
371 # Equivalent to (using auto-indexing):
374 # segmentation_id: 2000
375 # physical_network: phys_sriov0
377 # segmentation_id: 2010
378 # physical_network: phys_sriov1
382 name: 'nfvbench-lnet'
383 subnet: 'nfvbench-lsubnet'
384 cidr: '192.168.1.0/24'
389 name: 'nfvbench-rnet'
390 subnet: 'nfvbench-rsubnet'
391 cidr: '192.168.2.0/24'
396 name: 'nfvbench-mnet'
397 subnet: 'nfvbench-msubnet'
398 cidr: '192.168.3.0/24'
403 # IDLE INTERFACES: PVP, PVVP and non shared net only.
404 # By default each test VM will have 2 virtual interfaces for looping traffic.
405 # If service_chain_shared_net is false, additional virtual interfaces can be
406 # added at VM creation time, these interfaces will not carry any traffic and
407 # can be used to test the impact of idle interfaces in the overall performance.
408 # All these idle interfaces will use normal ports (not direct).
409 # Number of idle interfaces per VM (none by default)
410 idle_interfaces_per_vm: 0
412 # A new network is created for each idle interface.
413 # If service_chain_shared_net is true, the options below will be ignored
414 # and no idle interfaces will be added.
416 # Prefix for all idle networks, the final name will append the chain ID and idle index
417 # e.g. "nfvbench-idle-net.0.4" chain 0 idle index 4
418 name: 'nfvbench-idle-net'
419 # Subnet name to use for all idle subnetworks
420 subnet: 'nfvbench-idle-subnet'
421 # CIDR to use for all idle networks (value should not matter)
422 cidr: '192.169.1.0/24'
423 # Type of network associated to the idle virtual interfaces (vlan or vxlan)
425 # segmentation ID to use for the network attached to the idle virtual interfaces
426 # vlan: leave empty to let neutron pick the segmentation ID
427 # vxlan: must specify the starting VNI value to be used (cannot be empty)
428 # Note that NFVbench will use as many consecutive segmentation IDs as needed.
429 # For example, for 4 PVP chains and 8 idle
430 # interfaces per VM, NFVbench will use 32 consecutive values of segmentation ID
431 # starting from the value provided.
433 # physnet name to use for all idle interfaces
436 # In the scenario of PVVP + SRIOV, there is choice of how the traffic will be
437 # handled in the middle network. The default (false) will use vswitch, while
438 # SRIOV can be used by toggling below setting.
439 use_sriov_middle_net: false
441 # EXT chain only. Prefix names of edge networks or list of edge network names
442 # used to send traffic via traffic generator.
444 # If service_chain_shared_net is true, the left and right networks must pre-exist and match exactly by name.
446 # If service_chain_shared_net is false, each chain must have its own pre-existing left and right networks.
447 # left and right can take either a string prefix or a list of arbitrary network names
448 # If a string prefix is passed, an index will be appended to each network name to form the final name.
453 # ext-lnet0 ext-rnet0 for chain #0
454 # ext-lnet1 ext-rnet1 for chain #1
456 # If a list of strings is passed, each string in the list must be the name of the network used for the
457 # chain indexed by the entry position in the list.
458 # The list must have at least as many entries as there are chains
461 # left: ['ext-lnet', 'ext-lnet2']
462 # right: ['ext-rnet', 'ext-rnet2']
468 # Use 'true' to enable VXLAN encapsulation support and sent by the traffic generator
469 # When this option enabled internal networks 'network type' parameter value should be 'vxlan'
472 # Use 'true' to enable VLAN tagging of packets generated and sent by the traffic generator
473 # Leave empty or set to false if you do not want the traffic generator to insert the VLAN tag (this is
474 # needed for example if VLAN tagging is enabled on switch (access mode) or if you want to hook
475 # directly to a NIC).
476 # By default is set to true (which is the nominal use case with TOR and trunk mode to Trex ports)
477 # If VxLAN is enabled, this option should be set to false (vlan tagging for encapsulated packets
478 # is not supported). Use the vtep_vlan option to enable vlan tagging for the VxLAN overlay network.
481 # Used only in the case of EXT chain and no openstack or not admin access to specify the VLAN IDs to use.
482 # This property is ignored when OpenStakc is used or in the case of l2-loopback.
483 # If OpenStack is used leave the list empty, VLAN IDs are retrieved from OpenStack networks using Neutron API.
484 # If networks are shared across all chains (service_chain_shared_net=true), the list should have exactly 2 values
485 # If networks are not shared across chains (service_chain_shared_net=false), the list should have
487 # In the special case of l2-loopback the list should have the same VLAN id for all chains
489 # [1998, 1999] left network uses vlan 1998 right network uses vlan 1999
490 # [[1,2],[3,4]] chain 0 left vlan 1, right vlan 2 - chain 1 left vlan 3 right vlan 4
491 # [1010, 1010] same VLAN id with l2-loopback enabled
495 # ARP is used to discover the MAC address of VNFs that run L3 routing.
496 # Used only with EXT chain.
497 # False (default): ARP requests are sent to find out dest MAC addresses.
498 # True: do not send ARP but use provisioned dest macs instead
499 # (see mac_addrs_left and mac_addrs_right)
503 # You can add here more profiles as needed
504 # `l2frame_size` can be specified in any none zero integer value to represent the size in bytes
505 # of the L2 frame, or "IMIX" to represent the standard 3-packet size mixed sequence (IMIX1).
507 - name: traffic_profile_64B
509 - name: traffic_profile_IMIX
510 l2frame_size: ['IMIX']
511 - name: traffic_profile_1518B
512 l2frame_size: ['1518']
513 - name: traffic_profile_3sizes
514 l2frame_size: ['64', 'IMIX', '1518']
516 # Traffic Configuration
517 # bidirectional: to have traffic generated from both direction, set bidirectional to true
518 # profile: must be one of the profiles defined in traffic_profile
519 # The traffic profile can be overriden with the options --frame-size and --uni-dir
522 profile: traffic_profile_64B
524 # Check config and connectivity only - do not generate traffic
525 # Can be overriden by --no-traffic
530 # The rate pps for traffic going in reverse direction in case of unidirectional flow. Default to 1.
531 unidir_reverse_traffic_pps: 1
533 # The rate specifies if NFVbench should determine the NDR/PDR
534 # or if NFVbench should just generate traffic at a given fixed rate
535 # for a given duration (called "single run" mode)
536 # Supported rate format:
537 # NDR/PDR test: `ndr`, `pdr`, `ndr_pdr` (default)
538 # Or for single run mode:
539 # Packet per second: pps (e.g. `50pps`)
540 # Bits per second: bps, kbps, Mbps, etc (e.g. `1Gbps`, `1000bps`)
541 # Load percentage: % (e.g. `50%`)
542 # Can be overridden by --rate
545 # Default run duration (single run at given rate only)
546 # Can be overridden by --duration
549 # Interval between intermediate reports when interval reporting is enabled
550 # Can be overridden by --interval
553 # Default pause between iterations of a binary search (NDR/PDR)
556 # NDR / PDR configuration
558 # Drop rates represent the ratio of dropped packet to the total number of packets sent.
559 # Values provided here are percentages. A value of 0.01 means that at most 0.01% of all
560 # packets sent are dropped (or 1 packet every 10,000 packets sent)
562 # No Drop Rate in percentage; Default to 0.001%
564 # Partial Drop Rate in percentage; NDR should always be less than PDR
566 # The accuracy of NDR and PDR as a percnetage of line rate; The exact NDR
567 # or PDR should be within `load_epsilon` line rate % from the one calculated.
568 # For example, with a value 0.1, and a line rate of 10Gbps, the accuracy
569 # of NDR and PDR will be within 0.1% Of 10Gbps or 10Mbps.
570 # The lower the value the more iterations and the longer it will take to find the NDR/PDR.
571 # In practice, due to the precision of the traffic generator it is not recommended to
572 # set it to lower than 0.1
575 # Location where to store results in a JSON format. Must be container specific path.
576 # Can be overriden by --json
579 # Location where to store results in the NFVbench standard JSON format:
580 # <service-chain-type>-<service-chain-count>-<flow-count>-<packet-sizes>.json
581 # Example: PVP-1-10-64-IMIX.json
582 # Must be container specific path.
583 # Can be overriden by --std-json
586 # Prints debug messages (verbose mode)
587 # Can be overriden by --debug
590 # Set to a valid path name if logging to file is to be enabled
591 # Defaults to disabled
594 # When enabled, all results and/or logs will be sent to a fluentd servers at the requested IPs and ports
595 # A list of one or more fluentd servers identified by their IPs and port numbers should be given.
596 # For each recipient it is possible to enable both sending logs and performance
597 # results, or enable either logs or performance results. For enabling logs or results logging_tag or
598 # result_tag should be set.
601 # by default (logging_tag is empty) nfvbench log messages are not sent to fluentd
602 # to enable logging to fluents, specify a valid fluentd tag name to be used for the
606 # by default (result_tag is empty) nfvbench results are not sent to fluentd
607 # to enable sending nfvbench results to fluentd, specify a valid fluentd tag name
608 # to be used for the results records, which is different than logging_tag
611 # IP address of the server, defaults to loopback
614 # port # to use, by default, use the default fluentd forward port
617 # by default (logging_tag is empty) nfvbench log messages are not sent to fluentd
618 # to enable logging to fluents, specify a valid fluentd tag name to be used for the
621 # Module and class name of factory which will be used to provide classes dynamically for other components.
622 factory_module: 'nfvbench.factory'
623 factory_class: 'BasicFactory'
625 # Custom label added for every perf record generated during this run.
626 # Can be overriden by --user-label
630 # THESE FIELDS SHOULD BE USED VERY RARELY
632 # Skip vswitch configuration and retrieving of stats
633 # Can be overriden by --no-vswitch-access
634 # Should be left to the default value (false)
635 no_vswitch_access: false