Initial structure for e2e deployment scripting 69/70569/6
authorRihab Banday <rihab.banday@ericsson.com>
Tue, 21 Jul 2020 10:49:45 +0000 (10:49 +0000)
committerRihab Banday <rihab.banday@ericsson.com>
Wed, 29 Jul 2020 15:49:40 +0000 (15:49 +0000)
This patch includes the initial skeleton of the e2e deployment
tooling. This is expected to evolve as further development progresses.

Change-Id: Ie1e334e672acb2213e8f6c2174506b685f826f98
Signed-off-by: Rihab Banday <rihab.banday@ericsson.com>
12 files changed:
.gitreview
README [new file with mode: 0644]
create_vm.sh [new file with mode: 0755]
deploy.env [new file with mode: 0644]
deploy.sh [new file with mode: 0755]
functions.sh [new file with mode: 0755]
hw_config/intel/idf.yaml [new file with mode: 0644]
hw_config/intel/pdf.yaml [new file with mode: 0644]
hw_config/intel/setup_network.sh [new file with mode: 0755]
sw_config/bmra/all.yml [new file with mode: 0644]
sw_config/bmra/inventory.ini [new file with mode: 0644]
sw_config/bmra/node1.yml [new file with mode: 0644]

index 500c7ce..57a5a2a 100644 (file)
@@ -1,7 +1,6 @@
 
-        [gerrit]
-        host=gerrit.opnfv.org
-        port=29418
-        project=kuberef
-        defaultbranch=master
-        
\ No newline at end of file
+[gerrit]
+host=gerrit.opnfv.org
+port=29418
+project=kuberef
+defaultbranch=master
diff --git a/README b/README
new file mode 100644 (file)
index 0000000..f9bf1ab
--- /dev/null
+++ b/README
@@ -0,0 +1 @@
+#TODO
diff --git a/create_vm.sh b/create_vm.sh
new file mode 100755 (executable)
index 0000000..39bc38f
--- /dev/null
@@ -0,0 +1,40 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c)
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# TODO This will be merged in main functions.sh
+
+sudo mkdir -p /var/lib/libvirt/images/$1
+sudo qemu-img create -f qcow2 \
+    -o backing_file=/var/lib/libvirt/images/ubuntu-18.04.qcow2 \
+    /var/lib/libvirt/images/$1/"$1".qcow2 10G
+
+# Create cloud-init configuration files
+cat <<EOL > user-data
+#cloud-config
+users:
+  - name: ubuntu
+    ssh-authorized-keys:
+      - $(cat $HOME/.ssh/id_rsa.pub)
+    sudo: ['ALL=(ALL) NOPASSWD:ALL']
+    groups: sudo
+    shell: /bin/bash
+EOL
+cat <<EOL > meta-data
+local-hostname: $VM_NAME
+EOL
+
+sudo genisoimage  -output /var/lib/libvirt/images/$1/"$1"-cidata.iso \
+    -volid cidata -joliet -rock user-data meta-data
+
+sudo virt-install --connect qemu:///system --name $VM_NAME \
+    --ram 4096 --vcpus=4 --os-type linux --os-variant ubuntu16.04 \
+    --disk path=/var/lib/libvirt/images/$1/"$1".qcow2,format=qcow2 \
+    --disk /var/lib/libvirt/images/$1/"$1"-cidata.iso,device=cdrom \
+    --import --network network=default --network bridge=$BRIDGE,model=rtl8139 --noautoconsole
diff --git a/deploy.env b/deploy.env
new file mode 100644 (file)
index 0000000..71a951a
--- /dev/null
@@ -0,0 +1,22 @@
+# Define environment variables
+
+export VENDOR=intel
+export INSTALLER=bmra
+
+# Name of host bridge to which the VM is connected to (used for PXE)
+export BRIDGE=pxebr
+
+# Jump VM details
+export VM_NAME=kuberef-jump
+export USERNAME=ubuntu
+export PROJECT_ROOT="/home/ubuntu"
+
+# Network configuration details of PXE interface in VM
+export PXE_IF=ens4
+export PXE_IF_IP=10.10.190.211
+export PXE_IF_MAC=52:54:00:4a:e8:2d
+export NETMASK=255.255.255.0
+
+# IPs of the provisioned nodes
+export MASTER_IP=10.10.190.202
+export WORKER_IP=10.10.190.203
diff --git a/deploy.sh b/deploy.sh
new file mode 100755 (executable)
index 0000000..4ecd6c6
--- /dev/null
+++ b/deploy.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c)
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o xtrace
+set -o errexit
+set -o nounset
+
+# Script for end to end RI-2 deployment using Infra engine and BMRA.
+# Please refer to README for detailed information.
+
+# Get path information
+DIRECTORY=$(readlink -f $0)
+CURRENTPATH=$(dirname $DIRECTORY)
+
+# Source env variables & functions
+source $CURRENTPATH/deploy.env
+source $CURRENTPATH/functions.sh
+
+# Clean up leftovers
+clean_up
+
+# The next two functions require that you know your pxe network configuration
+# and IP of resulting jumphost VM in advance. This IP/MAC info also then needs to
+# be added in PDF & IDF files (not supported yet via this script)
+# Create jumphost VM & setup PXE network
+create_jump
+setup_PXE_network
+
+# Get IP of the jumphost VM
+get_vm_ip
+
+# Copy files needed by Infra engine & BMRA in the jumphost VM
+copy_files_jump
+
+# Provision remote hosts
+provision_hosts
+
+# Setup networking (Adapt according to your network setup)
+setup_network
+
+# Provision k8s cluster (currently BMRA)
+provision_k8s
diff --git a/functions.sh b/functions.sh
new file mode 100755 (executable)
index 0000000..edfbb46
--- /dev/null
@@ -0,0 +1,110 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c)
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Clean up
+
+clean_up() {
+    if sudo virsh list --all | grep "${VM_NAME}.*running" ; then
+        sudo virsh destroy $VM_NAME
+    fi
+    if sudo virsh list --all | grep "${VM_NAME}" ; then
+        sudo virsh undefine $VM_NAME
+    fi
+        sudo rm -rf /var/lib/libvirt/images/$VM_NAME
+        sleep 5
+}
+
+# Create jumphost VM
+
+create_jump() {
+    ./create_vm.sh $VM_NAME
+    sleep 30
+}
+
+# Get jumphost VM IP
+
+get_vm_ip() {
+    export VM_IP=$(sudo virsh domifaddr ${VM_NAME} | \
+            sed 3q | sed '$!d' |awk '{print $4}' | cut -d/ -f1)
+    echo "VM IP is ${VM_IP}"
+}
+
+# Setup PXE network
+
+setup_PXE_network() {
+    get_vm_ip
+    ssh -o StrictHostKeyChecking=no -tT $USERNAME@$VM_IP << EOF
+    sudo ifconfig $PXE_IF up
+    sudo ifconfig $PXE_IF $PXE_IF_IP netmask $NETMASK
+    sudo ifconfig $PXE_IF hw ether $PXE_IF_MAC
+EOF
+}
+
+# Copy files needed by Infra engine & BMRA in the jumphost VM
+
+copy_files_jump() {
+    scp -r -o StrictHostKeyChecking=no $CURRENTPATH/{hw_config/$VENDOR/,sw_config/$INSTALLER/} \
+            $USERNAME@$VM_IP:$PROJECT_ROOT
+}
+
+# Host Provisioning
+
+provision_hosts() {
+# SSH to jumphost
+    ssh -tT $USERNAME@$VM_IP << EOF
+# Install and run cloud-infra
+    if [ ! -d "${PROJECT_ROOT}/engine" ]; then
+      ssh-keygen -t rsa -N "" -f ${PROJECT_ROOT}/.ssh/id_rsa
+      git clone https://gerrit.nordix.org/infra/engine.git
+      cp $PROJECT_ROOT/$VENDOR/{pdf.yaml,idf.yaml} ${PROJECT_ROOT}/engine/engine
+#      sudo mkdir /httpboot && sudo cp -r ${PROJECT_ROOT}/deployment_image.qcow2 /httpboot #will be removed when centos image path will be added in infra-engine
+    fi
+      cd ${PROJECT_ROOT}/engine/engine && ./deploy.sh -s ironic -d centos7 \
+       -p file:///${PROJECT_ROOT}/engine/engine/pdf.yaml -i file:///${PROJECT_ROOT}/engine/engine/idf.yaml
+EOF
+}
+
+# Setup networking on provisioned hosts (Adapt setup_network.sh according to your network setup) 
+
+setup_network() {
+# SSH to jumphost
+    ssh -tT $USERNAME@$VM_IP << EOF
+    ssh -o StrictHostKeyChecking=no root@$MASTER_IP 'bash -s' <  ${PROJECT_ROOT}/${VENDOR}/setup_network.sh
+    ssh -o StrictHostKeyChecking=no root@$WORKER_IP 'bash -s' <  ${PROJECT_ROOT}/${VENDOR}/setup_network.sh
+EOF
+}
+
+# k8s Provisioning (currently BMRA)
+
+provision_k8s() {
+# SSH to jumphost
+    ssh -tT $USERNAME@$VM_IP << EOF
+# Install BMRA
+    if [ ! -d "${PROJECT_ROOT}/container-experience-kits" ]; then
+      curl -fsSL https://get.docker.com/ | sh
+      printf "Waiting for docker service..."
+      until sudo docker info; do
+          printf "."
+          sleep 2
+      done
+      git clone https://github.com/intel/container-experience-kits.git
+      cd ${PROJECT_ROOT}/container-experience-kits
+      git checkout v1.4.1
+      git submodule update --init
+      cp -r examples/group_vars examples/host_vars .
+      cp ${PROJECT_ROOT}/${INSTALLER}/inventory.ini ${PROJECT_ROOT}/container-experience-kits/
+      cp ${PROJECT_ROOT}/${INSTALLER}/all.yml ${PROJECT_ROOT}/container-experience-kits/group_vars/
+      cp ${PROJECT_ROOT}/${INSTALLER}/node1.yml ${PROJECT_ROOT}/container-experience-kits/host_vars/
+    fi
+    sudo service docker start
+    sudo docker run --rm -v ${PROJECT_ROOT}/container-experience-kits:/bmra -v ~/.ssh/:/root/.ssh/ \
+        rihabbanday/bmra-install:centos ansible-playbook -i /bmra/inventory.ini /bmra/playbooks/cluster.yml
+EOF
+}
diff --git a/hw_config/intel/idf.yaml b/hw_config/intel/idf.yaml
new file mode 100644 (file)
index 0000000..d9127ff
--- /dev/null
@@ -0,0 +1,60 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2020
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+idf:
+  version: 0.1
+  net_config: &net_config
+    oob:
+      interface: 0
+      network: 10.10.190.0
+      gateway: 10.10.190.1
+      vlan: native
+      mask: 24
+      dns:
+        - 8.8.8.8
+    admin:  # admin
+      interface: 1
+      vlan: 191
+      network: 10.10.191.0
+      mask: 24
+      bridge: br-name
+  kubespray: &idf_kubespray
+    nodes_roles:
+      node1: [k8s-cluster, kube-node, kube-master, etcd, vault]
+      node2: [k8s-cluster, kube-node]
+    groups:
+      k8s-cluster:
+        - kube-node
+        - kube-master
+    hostnames:
+      node1: pod19-node1
+      node2: pod19-node2
+    network:
+      # network mapping
+      network_mapping:
+        net_admin: oob
+        # Public network
+        net_public: oob
+        # Management network used by installer components to communicate
+        net_mgmt: oob
+
+engine:
+  pod_name: pod19-jump
+  net_config: *net_config
+
+  # net_config network to be used by the PXE
+  pxe_network: oob
+
+  # net_config network to be used for the internet access
+  public_network: oob
+
+  # interface to be used by the PXE
+  pxe_interface: ens4
+
+  installers:
+    kubespray: *idf_kubespray
diff --git a/hw_config/intel/pdf.yaml b/hw_config/intel/pdf.yaml
new file mode 100644 (file)
index 0000000..c97f29a
--- /dev/null
@@ -0,0 +1,137 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2020
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+### POD descriptor file ###
+
+version: 1.0
+details:
+  pod_owner: Intel
+  contact: Intel
+  lab: Intel Pharos Lab
+  location: somewhere
+  type: baremetal
+  link: http://wiki.opnfv.org/display/pharos/Intel+Hosting
+##############################################################################
+jumphost:
+  name: pod19-jump
+  node: &nodeparas
+    type: baremetal
+    vendor: Intel
+    model: pc
+    arch: x86_64
+    cpus: 80
+    cpu_cflags: broadwell
+    cores: 20
+    memory: 192G
+  disks: &disks
+    - name: 'Intel SSDSC2KB48'
+      disk_capacity: 447G
+      disk_type: hdd
+      disk_interface: scsi
+  os: ubuntu1804
+  remote_params: &remoteparas
+    type:
+      - ipmi
+    user: root
+    pass: root
+  remote_management:
+    <<: *remoteparas
+    address: 10.10.190.10
+    mac_address: "A4:BF:01:00:03:D1"
+  interfaces:
+    - name: 'nic1'
+      address: 10.10.190.211
+      mac_address: "52:54:00:4a:e8:2d"
+      vlan: native
+    - name: 'nic2'
+      address: 192.168.122.113
+      mac_address: "52:54:00:74:b4:66"
+      vlan: native
+    - name: 'nic3'
+      address: 10.10.192.1
+      mac_address: "3c:fd:fe:aa:be:24"
+      vlan: native
+##############################################################################
+nodes:
+  - name: node1
+    node: *nodeparas
+    disks: *disks
+    remote_management:
+      <<: *remoteparas
+      address: 10.10.190.11
+      mac_address: "A4:BF:01:00:06:15"
+    interfaces:
+      - name: 'nic1'
+        speed: 1gb
+        features:
+        address: 10.10.190.202
+        mac_address: "a4:bf:01:4b:55:f4"
+        vlan: native
+      - name: 'nic2'
+        speed: 1gb
+        features:
+        address: 192.168.11.211
+        mac_address: "a4:bf:01:4b:55:f5"
+        vlan: 191
+      - name: 'nic3'
+        speed: 10gb
+        features: 'dpdk|sriov'
+        address: 10.10.192.2
+        mac_address: "3c:fd:fe:aa:b8:f4"
+        vlan: native
+      - name: 'nic4'
+        speed: 10gb
+        features: 'dpdk|sriov'
+        address: 10.10.193.2
+        mac_address: "3c:fd:fe:aa:b8:f5"
+        vlan: native
+      - name: 'nic5'
+        speed: 10gb
+        features: 'dpdk|sriov'
+        address: 10.10.195.2
+        mac_address: "00:00:00:00:00:00"
+        vlan: native
+########################################################################
+  - name: node2
+    node: *nodeparas
+    disks: *disks
+    remote_management:
+      <<: *remoteparas
+      address: 10.10.190.12
+      mac_address: "A4:BF:01:00:09:7B"
+    interfaces:
+      - name: 'nic1'
+        speed: 1gb
+        features:
+        address: 10.10.190.203
+        mac_address: "a4:bf:01:4b:4f:9c"
+        vlan: native
+      - name: 'nic2'
+        speed: 1gb
+        features:
+        address: 10.10.190.203
+        mac_address: "a4:bf:01:4b:4f:9d"
+        vlan: 191
+      - name: 'nic3'
+        speed: 10gb
+        features: 'dpdk|sriov'
+        address: 10.10.192.3
+        mac_address: "3c:fd:fe:aa:b7:fc"
+        vlan: native
+      - name: 'nic4'
+        speed: 10gb
+        features: 'dpdk|sriov'
+        address: 10.10.193.3
+        mac_address: "3c:fd:fe:aa:b7:fd"
+        vlan: native
+      - name: 'nic5'
+        speed: 10gb
+        features: 'dpdk|sriov'
+        address: 10.10.195.3
+        mac_address: "00:00:00:00:00:00"
+        vlan: 1193
diff --git a/hw_config/intel/setup_network.sh b/hw_config/intel/setup_network.sh
new file mode 100755 (executable)
index 0000000..a6ff6df
--- /dev/null
@@ -0,0 +1,11 @@
+# Adapt this script according to your network setup
+# TODO Get networking info from PDF & IDF
+# TODO Add support in infra engine to update nameserver, etc
+# files with correct info
+#!/bin/bash
+
+echo nameserver 8.8.8.8 > /etc/resolv.conf
+sed -i 's/NM_CONTROLLED=yes/NM_CONTROLLED=no/g' /etc/sysconfig/network-scripts/ifcfg-eth2
+echo GATEWAY=10.10.190.1 >> /etc/sysconfig/network-scripts/ifcfg-eth2
+ifup eth2
+
diff --git a/sw_config/bmra/all.yml b/sw_config/bmra/all.yml
new file mode 100644 (file)
index 0000000..a1140d3
--- /dev/null
@@ -0,0 +1,96 @@
+---
+## BMRA master playbook variables ##
+
+# Node Feature Discovery
+nfd_enabled: false
+nfd_build_image_locally: false
+nfd_namespace: kube-system
+nfd_sleep_interval: 30s
+
+# Intel CPU Manager for Kubernetes
+cmk_enabled: false
+cmk_namespace: kube-system
+cmk_use_all_hosts: false # 'true' will deploy CMK on the master nodes too
+cmk_hosts_list: node1,node2 # allows to control where CMK nodes will run, leave this option commented out to deploy on all K8s nodes
+cmk_shared_num_cores: 2 # number of CPU cores to be assigned to the "shared" pool on each of the nodes
+cmk_exclusive_num_cores: 2 # number of CPU cores to be assigned to the "exclusive" pool on each of the nodes
+#cmk_shared_mode: packed # choose between: packed, spread, default: packed
+#cmk_exclusive_mode: packed # choose between: packed, spread, default: packed
+
+# Intel SRIOV Network Device Plugin
+sriov_net_dp_enabled: false
+sriov_net_dp_namespace: kube-system
+# whether to build and store image locally or use one from public external registry
+sriov_net_dp_build_image_locally: false
+# SR-IOV network device plugin configuration.
+# For more information on supported configuration refer to: https://github.com/intel/sriov-network-device-plugin#configurations
+sriovdp_config_data: |
+    {
+        "resourceList": [{
+                "resourceName": "intel_sriov_netdevice",
+                "selectors": {
+                    "vendors": ["8086"],
+                    "devices": ["154c", "10ed"],
+                    "drivers": ["iavf", "i40evf", "ixgbevf"]
+                }
+            },
+            {
+                "resourceName": "intel_sriov_dpdk",
+                "selectors": {
+                    "vendors": ["8086"],
+                    "devices": ["154c", "10ed"],
+                    "drivers": ["vfio-pci"]
+                }
+            }
+        ]
+    }
+
+
+# Intel Device Plugins for Kubernetes
+qat_dp_enabled: false
+qat_dp_namespace: kube-system
+gpu_dp_enabled: false
+gpu_dp_namespace: kube-system
+
+# Intel Telemetry Aware Scheduling
+tas_enabled: false
+tas_namespace: default
+# create default TAS policy: [true, false]
+tas_create_policy: false
+
+# Create reference net-attach-def objects
+example_net_attach_defs:
+  userspace_ovs_dpdk: false
+  userspace_vpp: false
+  sriov_net_dp: false
+
+## Proxy configuration ##
+#http_proxy: "http://proxy.example.com:1080"
+#https_proxy: "http://proxy.example.com:1080"
+#additional_no_proxy: ".example.com"
+
+#Topology Manager flags
+kubelet_node_custom_flags:
+  - "--feature-gates=TopologyManager=true"
+  - "--topology-manager-policy=none"
+
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+
+## Kubespray variables ##
+
+# default network plugins and kube-proxy configuration
+kube_network_plugin_multus: true
+multus_version: v3.3
+kube_network_plugin: flannel
+kube_pods_subnet: 10.244.0.0/16
+kube_service_addresses: 10.233.0.0/18
+kube_proxy_mode: iptables
+
+# please leave it set to "true", otherwise Intel BMRA features deployed as Helm charts won't be installed
+helm_enabled: true
+
+# Docker registry running on the cluster allows us to store images not avaialble on Docker Hub, e.g. CMK
+registry_enabled: true
+registry_storage_class: ""
+registry_local_address: "localhost:5000"
diff --git a/sw_config/bmra/inventory.ini b/sw_config/bmra/inventory.ini
new file mode 100644 (file)
index 0000000..9fb9f41
--- /dev/null
@@ -0,0 +1,18 @@
+[all]
+master1 ansible_host=10.10.190.202 ip=10.10.190.202
+node1   ansible_host=10.10.190.203 ip=10.10.190.203
+
+[kube-master]
+master1
+
+[etcd]
+master1
+
+[kube-node]
+node1
+
+[k8s-cluster:children]
+kube-master
+kube-node
+
+[calico-rr]
diff --git a/sw_config/bmra/node1.yml b/sw_config/bmra/node1.yml
new file mode 100644 (file)
index 0000000..3e4f634
--- /dev/null
@@ -0,0 +1,63 @@
+---
+# Kubernetes node configuration
+
+# Enable SR-IOV networking related setup
+sriov_enabled: false
+
+# sriov_nics: SR-IOV PF specific configuration list
+sriov_nics:
+  - name: enp24s0f0              # PF interface names
+    sriov_numvfs: 2              # number of VFs to create for this PF(enp24s0f0)
+    vf_driver: vfio-pci          # VF driver to be attached for all VFs under this PF(enp24s0f0), "i40evf", "iavf", "vfio-pci", "igb_uio"
+    ddp_profile: "gtp.pkgo"      # DDP package name to be loaded into the NIC
+  - name: enp24s0f1
+    sriov_numvfs: 4
+    vf_driver: iavf
+
+sriov_cni_enabled: false
+
+# install DPDK
+install_dpdk: false # DPDK installation is required for sriov_enabled:true; default to false
+
+userspace_cni_enabled: false
+
+# Intel Bond CNI Plugin
+bond_cni_enabled: false
+
+vpp_enabled: false
+ovs_dpdk_enabled: false
+# CPU mask for OVS-DPDK PMD threads
+ovs_dpdk_lcore_mask: 0x1
+# Huge memory pages allocated by OVS-DPDK per NUMA node in megabytes
+# example 1: "256,512" will allocate 256MB from node 0 abd 512MB from node 1
+# example 2: "1024" will allocate 1GB fron node 0 on a single socket board, e.g. in a VM
+ovs_dpdk_socket_mem: "256,0"
+
+# Set to 'true' to update i40e and i40evf kernel modules
+force_nic_drivers_update: false
+
+# install Intel x700 & x800 series NICs DDP packages
+install_ddp_packages: false
+
+# Enables hugepages support
+hugepages_enabled: false
+
+# Hugepage sizes available: 2M, 1G
+default_hugepage_size: 1G
+
+# Sets how many hugepages of each size should be created
+hugepages_1G: 4
+hugepages_2M: 0
+
+# CPU isolation from Linux scheduler
+isolcpus_enabled: false
+isolcpus: "4-7"
+
+# Intel CommsPowerManagement
+sst_bf_configuration_enabled: false
+# Option sst_bf_mode requires sst_bf_configuration_enabled to be set to 'true'.
+# There are three configuration modes:
+# [s] Set SST-BF config (set min/max to 2700/2700 and 2100/2100)
+# [m] Set P1 on all cores (set min/max to 2300/2300)
+# [r] Revert cores to min/Turbo (set min/max to 800/3900)
+sst_bf_mode: s