2 # The only global parameter at this time is ha_enabled, which will use
3 # the tripleo ha architecture described here:
4 # https://github.com/beekhof/osp-ha-deploy/blob/master/HA-keepalived.md
5 # with 3 controllers by default
7 # If ha_enabled is false, there will only be one controller.
10 # introspect defaults to True,
11 # Enables/disables the introspection process at deploy time.
13 # ipxe defaults to True
14 # Enables/disables the use of ipxe for provisioning
18 # Whether or not to use containers for the overcloud services
21 # Which SDN controller to use. Valid options are 'opendaylight', 'onos',
22 # 'opendaylight-external', 'opencontrail' or false. A value of false will
23 # use Neutron's OVS ML2 controller.
24 sdn_controller: opendaylight
26 # Which version of ODL to use. This is only valid if 'opendaylight' was used
27 # above. Valid options are 'nitrogen', 'oxygen'. If no value
28 # is specified, nitrogen will be used.
29 # odl_version: nitrogen
31 # Whether to install and configure Tacker (VNF Manager)
34 # Whether to configure Congress (policy as a service) datasources
35 # Note: Congress is already installed by default
38 # Whether to configure ODL or ONOS with Service Function Chaining support.
39 # This requires the opnfv-apex-opendaylight-sfc package to be installed,
40 # since it uses a different overcloud image.
43 # Whether to configure ODL with SDNVPN support.
46 # Which dataplane to use for overcloud tenant networks. Valid options are
47 # 'ovs', 'ovs_dpdk' and 'fdio'.
50 # Whether to run the kvm real time kernel (rt_kvm) in the compute node(s) to
51 # reduce the network latencies caused by network function virtualization
54 # Whether to install and configure fdio functionality in the overcloud
55 # The dataplane should be specified as fdio if this is set to true
58 # Whether to install and configure SRIOV service in the compute node(s) to
59 # allow VMs to use VFs/PFs. The user must know in advance the name of the
60 # SRIOV capable NIC that will be configured.
63 # Whether to run vsperf after the install has completed
66 # Specify a device for ceph to use for the OSDs. By default a virtual disk
67 # is created for the OSDs. This setting allows you to specify a different
68 # target for the OSDs. The setting must be valid on all overcloud nodes.
69 # The controllers and the compute nodes all have OSDs setup on them and
70 # therefore this device name must be valid on all overcloud nodes.
71 # ceph_device: /dev/sdb
73 # Set performance options on specific roles. The valid roles are 'Compute',
74 # 'Controller' and 'Storage', and the valid sections are 'kernel' and 'nova'
78 # # In this example, these three settings will be passed to the kernel
79 # # boot line. Any key/value pair can be entered here, so care should
80 # # be taken to ensure that machines do not fail to boot.
82 # # isolcpus is generally used to push host processes off a particular
83 # # core, so that it can be dedicated to a specific process. On control
84 # # nodes this could be an ovs_dpdk process.
86 # # Hugepages are required for ovs_dpdk support.
88 # # intel_iommu is also required for ovs_dpdk support.
92 # # This is currently the only available option in the nova section. It
93 # # will add the provided string to vcpu_pin_set in nova.conf. This is
94 # # used to pin guest VMs to a set of CPU cores, and is decsribed in
96 # # http://docs.openstack.org
97 # # /ocata/config-reference/compute/config-options.html
100 # # On compute nodes, isolcpus is usually used to reserve cores for use
101 # # either by VMs or ovs_dpdk
106 # Set yardstick option to install yardstick
109 # Set dovetail option to install dovetail
112 # Whether the nodes are deployed as openstack, kubernetes or openshift nodes
113 # Defaults to openstack.
114 # Possible values are openstack, k8s, openshift