2 # NFVbench default configuration file
4 # This configuration file is ALWAYS loaded by NFVbench and should never be modified by users.
5 # To specify your own property values, always define them in a separate config file
6 # and pass that file to the script using -c or --config <file>
7 # Property values in that config file will override the default values in the current file
10 # IMPORTANT CUSTOMIZATION NOTES
11 # There are roughly 2 types of NFVbench config based on the OpenStack encaps used:
12 # - VLAN (OVS, OVS-DPDK, ML2/VPP)
13 # Many of the fields to customize are relevant to only 1 of the 2 encaps
14 # These will be clearly labeled "VxLAN only" or "VLAN only"
15 # Fields that are not applicable will not be used by NFVbench and can be left empty
17 # All fields are applicable to all encaps/traffic generators unless explicitly marked otherwise.
18 # Fields that can be over-ridden at the command line are marked with the corresponding
19 # option, e.g. "--interval"
22 # Forwarder to use in nfvbenchvm image. Available options: ['vpp', 'testpmd']
25 # By default (empty) NFVBench will try to locate a VM image file
26 # from the package root directory named "nfvbench-<version>.qcow2" and
27 # upload that file. The image name will be "nfvbench-<version>"
28 # This can be overridden by specifying here a pathname of a file
29 # that follows the same naming convention.
30 # In most cases, this field should be left empty as the packaging should
31 # include the proper VM image file
34 # Name of the flavor to use for the loopback VMs
36 # If the provided name is an exact match to a flavor name known by OpenStack
37 # (as shown from 'nova flavor-list'), that flavor will be reused.
38 # Otherwise, a new flavor will be created with attributes listed below.
39 flavor_type: 'nfvbench.medium'
41 # Custom flavor attributes
43 # Number of vCPUs for the flavor
45 # Memory for the flavor in MB
47 # Size of local disk in GB
49 # metadata are supported and can be added if needed, optional
50 # note that if your openstack does not have NUMA optimization
51 # (cpu pinning and huge pages)
52 # you must comment out extra_specs completely otherwise
53 # loopback VM creation will fail
55 "hw:cpu_policy": dedicated
56 "hw:mem_page_size": large
58 # Name of the availability zone to use for the test VMs
59 # Must be one of the zones listed by 'nova availability-zone-list'
60 # If the selected zone contains only 1 compute node and PVVP inter-node flow is selected,
61 # application will use intra-node PVVP flow.
62 # List of compute nodes can be specified, must be in given availability zone if not empty
63 #availability_zone: 'nova'
68 # Credentials for SSH connection to TOR switches.
70 # Leave type empty or switch list empty to skip TOR switches configuration.
71 # Preferably use 'no_tor_access' to achieve the same behavior.
72 # (skipping TOR config will require the user to pre-stitch the traffic generator interfaces
73 # to the service chain under test, needed only if configured in access mode)
75 # Switches are only needed if type is not empty.
76 # You can configure 0, 1 or 2 switches
77 # no switch: in this case NFVbench will not attempt to ssh to the switch
78 # and stitching of traffic must be done externally
79 # 1 switch: this assumes that both traffic generator interfaces are wired to the same switch
80 # 2 switches: this is the recommended setting wuth redundant switches, in this case each
81 # traffic generator interface must be wired to a different switch
88 # Skip TOR switch configuration and retrieving of stats
89 # Can be overriden by --no-tor-access
92 # Skip vswitch configuration and retrieving of stats
93 # Can be overriden by --no-vswitch-access
94 no_vswitch_access: false
96 # Type of service chain to run, possible options are PVP, PVVP and EXT
97 # PVP - port to VM to port
98 # PVVP - port to VM to VM to port
99 # EXT - external chain used only for running traffic and checking traffic generator counters,
100 # all other parts of chain must be configured manually
101 # Can be overriden by --service-chain
104 # Total number of service chains, every chain has own traffic stream
105 # Can be overriden by --service-chain-count
106 service_chain_count: 1
108 # Total number of traffic flows for all chains and directions generated by the traffic generator.
109 # Minimum is '2 * service_chain_count', it is automatically adjusted if too small
110 # value was configured. Must be even.
111 # Every flow has packets with different IPs in headers
112 # Can be overriden by --flow-count
115 # Used by PVVP chain to spawn VMs on different compute nodes
116 # Can be overriden by --inter-node
119 # set to true if service chains should use SRIOV
120 # This requires SRIOV to be available on compute nodes
123 # Skip interfaces config on EXT service chain
124 # Can be overriden by --no-int-config
127 # Resources created by NFVbench will not be removed
128 # Can be overriden by --no-cleanup
131 # Configuration for traffic generator
133 # Name of the traffic generator, only for informational purposes
134 host_name: 'nfvbench_tg'
135 # this is the default traffic generator profile to use
136 # the name must be defined under generator_profile
137 # you can override the traffic generator to use using the
138 # -g or --traffic-gen option at the command line
139 default_profile: trex-local
141 # IP addresses for L3 traffic.
142 # All of the IPs are used as base for IP sequence computed based on chain or flow count.
144 # `ip_addrs` base IPs used as src and dst in packet header, quantity depends on flow count
145 # `ip_addrs_step`: step for generating IP sequence. Use "random" for random patterns, default is 0.0.0.1.
146 # `tg_gateway_ip_addrs` base IPs for traffic generator ports, quantity depends on chain count
147 # `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1
148 # `gateway_ip_addrs`: base IPs of router gateways on both networks, quantity depends on chain count
149 # `gateway_ip_addrs_step`: step for generating router gateway sequences. default is 0.0.0.1
150 # `udp_src_port`: the source port for sending UDP traffic, default is picked by TRex (53)
151 # `udp_dst_port`: the destination port for sending UDP traffic, default is picked by TRex (53)
152 ip_addrs: ['10.0.0.0/8', '20.0.0.0/8']
153 ip_addrs_step: 0.0.0.1
154 tg_gateway_ip_addrs: ['1.1.0.100', '2.2.0.100']
155 tg_gateway_ip_addrs_step: 0.0.0.1
156 gateway_ip_addrs: ['1.1.0.2', '2.2.0.2']
157 gateway_ip_addrs_step: 0.0.0.1
161 # Traffic Generator Profiles
162 # In case you have multiple testbeds or traffic generators,
163 # you can define one traffic generator profile per testbed/traffic generator.
165 # Generator profiles are listed in the following format:
166 # `name`: Traffic generator profile name (use a unique name, no space or special character)
167 # `tool`: Traffic generator tool to be used (currently supported is `TRex`).
168 # `ip`: IP address of the traffic generator.
169 # `cores`: Specify the number of cores for TRex traffic generator. ONLY applies to trex-local.
170 # `interfaces`: Configuration of traffic generator interfaces.
171 # `interfaces.port`: The port of the traffic generator to be used (leave as 0 and 1 resp.)
172 # `interfaces.switch_port`: Leave empty (reserved for advanced use cases)
173 # `interfaces.pci`: The PCI address of the intel NIC interface associated to this port
174 # `intf_speed`: The speed of the interfaces used by the traffic generator (per direction).
190 # -----------------------------------------------------------------------------
191 # These variables are not likely to be changed
196 # Number of seconds to wait for VMs to pass traffic in both directions
197 check_traffic_time_sec: 200
199 # General retry count
200 generic_retry_count: 100
202 # General poll period
205 # name of the loop VM
206 loop_vm_name: 'nfvbench-loop-vm'
208 # Default names, subnets and CIDRs for PVP/PVVP networks
209 # If a network with given name already exists it will be reused.
210 # - PVP only uses left and right
211 # - PVVP uses left, middle and right
212 # - for EXT chains, this structure is not relevant - refer to external_networks
213 # Otherwise a new internal network will be created with that name, subnet and CIDR.
215 # segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id
216 # will be assigned by Neutron.
217 # Must be unique for each network
218 # physical_network can be set to pick a specific phsyical network - by default (empty) the
219 # default physical network will be picked
220 # In the case of SR-IOV, both physical_network and segmentation ID must be provided
221 # For example to setup PVP using 2 different SR-IOV ports, you must put the appropriate physnet
222 # names under left.physical_network and right.physical_network.
223 # Example of override configuration to force PVP to run on 2 SRIOV ports (phys_sriov0 and phys_sriov1)
224 # using VLAN ID 2000 and 2001:
227 # segmentation_id: 2000
228 # physical_network: phys_sriov0
230 # segmentation_id: 2001
231 # physical_network: phys_sriov1
235 name: 'nfvbench-net0'
236 subnet: 'nfvbench-subnet0'
237 cidr: '192.168.1.0/24'
242 name: 'nfvbench-net1'
243 subnet: 'nfvbench-subnet1'
244 cidr: '192.168.2.0/24'
249 name: 'nfvbench-net2'
250 subnet: 'nfvbench-subnet2'
251 cidr: '192.168.3.0/24'
256 # EXT chain only. Names of edge networks which will be used to send traffic via traffic generator.
258 left: 'nfvbench-net0'
259 right: 'nfvbench-net1'
261 # Use 'true' to enable VLAN tagging of packets generated and sent by the traffic generator
262 # Leave empty you do not want the traffic generator to insert the VLAN tag. This is
263 # needed for example if VLAN tagging is enabled on switch (trunk mode) or if you want to hook directly to a NIC
264 # By default is set to true (which is the nominal use case with TOR and trunk mode to Trex)
267 # Specify only when you want to override VLAN IDs used for tagging with own values (exactly 2).
268 # Default behavior of VLAN tagging is to retrieve VLAN IDs from OpenStack networks provided above.
269 # In case of VxLAN this setting is ignored and only vtep_vlan from traffic generator profile is used.
270 # Example: [1998, 1999]
273 # Used only with EXT chain. MAC addresses of traffic generator ports are used as destination
274 # if 'no_arp' is set to 'true'. Otherwise ARP requests are sent to find out destination MAC addresses.
278 # You can add here more profiles as needed
279 # `l2frame_size` can be specified in any none zero integer value to represent the size in bytes
280 # of the L2 frame, or "IMIX" to represent the standard 3-packet size mixed sequence (IMIX1).
282 - name: traffic_profile_64B
284 - name: traffic_profile_IMIX
285 l2frame_size: ['IMIX']
286 - name: traffic_profile_1518B
287 l2frame_size: ['1518']
288 - name: traffic_profile_3sizes
289 l2frame_size: ['64', 'IMIX', '1518']
291 # Traffic Configuration
292 # bidirectional: to have traffic generated from both direction, set bidirectional to true
293 # profile: must be one of the profiles defined in traffic_profile
294 # The traffic profile can be overriden with the options --frame-size and --uni-dir
297 profile: traffic_profile_64B
299 # Check config and connectivity only - do not generate traffic
300 # Can be overriden by --no-traffic
303 # Do not reset tx/rx counters prior to running
304 # Can be overriden by --no-reset
309 # The rate pps for traffic going in reverse direction in case of unidirectional flow. Default to 1.
310 unidir_reverse_traffic_pps: 1
312 # The rate specifies if NFVbench should determine the NDR/PDR
313 # or if NFVbench should just generate traffic at a given fixed rate
314 # for a given duration (called "single run" mode)
315 # Supported rate format:
316 # NDR/PDR test: `ndr`, `pdr`, `ndr_pdr` (default)
317 # Or for single run mode:
318 # Packet per second: pps (e.g. `50pps`)
319 # Bits per second: bps, kbps, Mbps, etc (e.g. `1Gbps`, `1000bps`)
320 # Load percentage: % (e.g. `50%`)
321 # Can be overridden by --rate
324 # Default run duration (single run at given rate only)
325 # Can be overridden by --duration
328 # Interval between intermediate reports when interval reporting is enabled
329 # Can be overridden by --interval
332 # NDR / PDR configuration
334 # Drop rates represent the ratio of dropped packet to the total number of packets sent.
335 # Values provided here are percentages. A value of 0.01 means that at most 0.01% of all
336 # packets sent are dropped (or 1 packet every 10,000 packets sent)
338 # No Drop Rate in percentage; Default to 0.001%
340 # Partial Drop Rate in percentage; NDR should always be less than PDR
342 # The accuracy of NDR and PDR as a percnetage of line rate; The exact NDR
343 # or PDR should be within `load_epsilon` line rate % from the one calculated.
344 # For example, with a value 0.1, and a line rate of 10Gbps, the accuracy
345 # of NDR and PDR will be within 0.1% Of 10Gbps or 10Mbps.
346 # The lower the value the more iterations and the longer it will take to find the NDR/PDR.
347 # In practice, due to the precision of the traffic generator it is not recommended to
348 # set it to lower than 0.1
351 # Location where to store results in a JSON format. Must be container specific path.
352 # Can be overriden by --json
355 # Location where to store results in the NFVbench standard JSON format:
356 # <service-chain-type>-<service-chain-count>-<flow-count>-<packet-sizes>.json
357 # Example: PVP-1-10-64-IMIX.json
358 # Must be container specific path.
359 # Can be overriden by --std-json
362 # Prints debug messages (verbose mode)
363 # Can be overriden by --debug
366 # Set to a valid path name if logging to file is to be enabled
367 # Defaults to disabled
370 # When enabled, all logs will be sent to a fluentd server at the requested IP and port
371 # The fluentd "tag" and "label" fields for every message will be set to "nfvbench"
373 # by default (logging_tag is empty) nfvbench log messages are not sent to fluentd
374 # to enable logging to fluents, specify a valid fluentd tag name to be used for the
378 # IP address of the server, defaults to loopback
381 # port # to use, by default, use the default fluentd forward port
384 # Module and class name of factory which will be used to provide classes dynamically for other components.
385 factory_module: 'nfvbench.factory'
386 factory_class: 'BasicFactory'
388 # Custom label added for every perf record generated during this run.
389 # Can be overriden by --user-label