2 SPDX-FileCopyrightText: 2021 Intel Corporation
4 SPDX-License-Identifier: Apache-2.0
8 ## BMRA primary playbook variables ##
12 #kube_version: v1.20.4
14 #kube_version: v1.18.16
16 # Run system-wide package update (apt dist-upgrade, yum update, ...)
17 # Note: enabling this may lead to unexpected results
18 # Tip: you can set this per host using host_vars
19 update_all_packages: false
22 # Node Feature Discovery
23 nfd_enabled: {{ bmra.features.nfd }}
24 nfd_build_image_locally: false
25 nfd_namespace: kube-system
26 nfd_sleep_interval: 60s
28 # Intel CPU Manager for Kubernetes (CMK)
29 cmk_enabled: {{ bmra.features.cmk.enable }}
30 cmk_namespace: kube-system
31 cmk_use_all_hosts: false # 'true' will deploy CMK on the controller nodes too
32 #cmk_hosts_list: node1,node2 # allows to control where CMK nodes will run, leave this option commented out to deploy on all K8s nodes
33 cmk_shared_num_cores: {{ bmra.features.cmk.num_shared_cores }} # number of CPU cores to be assigned to the "shared" pool on each of the nodes
34 cmk_exclusive_num_cores: {{ bmra.features.cmk.num_exclusive_cores }} # number of CPU cores to be assigned to the "exclusive" pool on each of the nodes
35 # cmk_shared_mode: packed # choose between: packed, spread, default: packed
36 # cmk_exclusive_mode: packed # choose between: packed, spread, default: packed
37 autogenerate_isolcpus: {{ bmra.features.isolcpus.autogenerate }}
39 # Native CPU Manager (Kubernetes built-in)
40 # Note: Enabling CMK and built-in Native CPU Manager is NOT recommended.
41 # Setting this option as "true" enables the "static" policy, otherwise the default "none" policy is used.
42 # The reserved CPU cores settings are individual per each worker node, and therefore are available to configure in the host_vars file
43 native_cpu_manager_enabled: false
45 # Enable Kubernetes built-in Topology Manager
46 topology_manager_enabled: {{ bmra.features.topology_manager.enable }}
47 # There are four supported policies: none, best-effort, restricted, single-numa-node.
48 topology_manager_policy: "{{ bmra.features.topology_manager.policy }}"
50 # Intel SRIOV Network Device Plugin
51 sriov_net_dp_enabled: {{ bmra.features.sriov_net_dp }}
52 sriov_net_dp_namespace: kube-system
53 # whether to build and store image locally or use one from public external registry
54 sriov_net_dp_build_image_locally: true
55 # SR-IOV network device plugin configuration.
56 # For more information on supported configuration refer to: https://github.com/intel/sriov-network-device-plugin#configurations
57 sriovdp_config_data: |
60 "resourceName": "intel_sriov_netdevice",
63 "devices": ["154c", "10ed", "1889"],
64 "drivers": ["iavf", "ixgbevf"]
68 "resourceName": "intel_sriov_dpdk_700_series",
71 "devices": ["154c", "10ed"],
72 "drivers": ["vfio-pci"]
76 "resourceName": "intel_sriov_dpdk_800_series",
80 "drivers": ["vfio-pci"]
84 "resourceName": "intel_fpga",
85 "deviceType": "accelerator",
94 # Intel QAT Device Plugin for Kubernetes
95 qat_dp_enabled: {{ bmra.features.qat.enable }}
96 qat_dp_namespace: kube-system
97 qat_dp_build_image_locally: true
99 # Intel GPU Device Plugin for Kubernetes
100 gpu_dp_enabled: false
101 gpu_dp_namespace: kube-system
102 gpu_dp_build_image_locally: true
104 # Intel SGX Device Plugin for Kubernetes
105 sgx_dp_enabled: false
106 sgx_dp_build_image_locally: true
107 # ProvisionLimit is a number of containers that can share
108 # the same SGX provision device.
109 sgx_dp_provision_limit: 20
110 # EnclaveLimit is a number of containers that can share the
111 # same SGX enclave device.
112 sgx_dp_enclave_limit: 20
114 # Intel Telemetry Aware Scheduling
115 tas_enabled: {{ bmra.features.tas.enable }}
116 tas_namespace: monitoring
117 # create and enable TAS demonstration policy: [true, false]
118 tas_enable_demo_policy: {{ bmra.features.tas.demo_policy }}
120 # Create reference net-attach-def objects
121 example_net_attach_defs:
122 userspace_ovs_dpdk: false # Update to match host_vars CNI configuration
123 userspace_vpp: false # Update to match host_vars CNI configuration
124 sriov_net_dp: {{ bmra.features.sriov_net_dp }} # Update to match host_vars CNI configuration
126 ## Proxy configuration ##
127 #http_proxy: "http://proxy.example.com:1080"
128 #https_proxy: "http://proxy.example.com:1080"
129 #additional_no_proxy: ".example.com,mirror_ip"
131 # (Ubuntu only) disables DNS stub listener which may cause issues on Ubuntu
132 dns_disable_stub_listener: false
134 # Kubernetes cluster name, also will be used as DNS domain
135 cluster_name: cluster.local
137 ## Kubespray variables ##
139 # default network plugins and kube-proxy configuration
140 kube_network_plugin_multus: true
141 multus_version: v3.4.2
142 kube_network_plugin: flannel
143 kube_pods_subnet: 10.244.0.0/16
144 kube_service_addresses: 10.233.0.0/18
145 kube_proxy_mode: iptables
147 # comment this line out if you want to expose k8s services of type nodePort externally.
148 kube_proxy_nodeport_addresses_cidr: 127.0.0.0/8
150 # please leave it set to "true", otherwise Intel BMRA features deployed as Helm charts won't be installed
153 # local Docker Hub mirror, if it exists
154 #docker_registry_mirrors:
155 # - http://mirror_ip:mirror_port
157 # Docker registry running on the cluster allows us to store images not avaialble on Docker Hub, e.g. CMK
158 registry_local_address: "localhost:30500"
160 # Enable Pod Security Policy. This option enables PSP admission controller and creates minimal set of rules.
161 psp_enabled: {{ bmra.features.psp }}
163 # Set image pull policy to Always. Pulls images prior to starting containers. Valid credentials must be configured.
164 always_pull_enabled: true
166 # Telemetry configuration
167 collectd_scrap_interval: 30