add sudo
[kuberef.git] / functions.sh
1 #!/bin/bash
2
3 # SPDX-FileCopyrightText: 2021 Ericsson AB and others
4 #
5 # SPDX-License-Identifier: Apache-2.0
6
7 OS_ID=$(grep '^ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\"//g')
8
9 info() {
10     _print_msg "INFO" "$1"
11 }
12
13 error() {
14     _print_msg "ERROR" "$1"
15     exit 1
16 }
17
18 _print_msg() {
19     echo "$(date +%H:%M:%S) - $1: $2"
20 }
21
22 assert_non_empty() {
23     if [ -z "$1" ]; then
24         error "$2"
25     fi
26 }
27 if [ "${DEBUG:-false}" == "true" ]; then
28     set -o xtrace
29 fi
30
31 check_prerequisites() {
32     info "Check prerequisites"
33
34     #-------------------------------------------------------------------------------
35     # Check for DEPLOYMENT type
36     #-------------------------------------------------------------------------------
37     if ! [[ "$DEPLOYMENT" =~ ^(full|k8s)$ ]]; then
38         error "Unsupported value for DEPLOYMENT ($DEPLOYMENT)"
39     fi
40
41     #-------------------------------------------------------------------------------
42     # We shouldn't be running as root
43     #-------------------------------------------------------------------------------
44     if [[ "$(whoami)" == "root" ]] && [[ "$DEPLOYMENT" != "k8s" ]]; then
45         error "This script must not be run as root! Please switch to a regular user before running the script."
46     fi
47
48     #-------------------------------------------------------------------------------
49     # Check for passwordless sudo
50     #-------------------------------------------------------------------------------
51     if ! sudo -n "true"; then
52         error "passwordless sudo is needed for '$(id -nu)' user."
53     fi
54
55     #-------------------------------------------------------------------------------
56     # Check if SSH key exists
57     #-------------------------------------------------------------------------------
58     if [[ ! -f "$HOME/.ssh/id_rsa" ]]; then
59         error "You must have SSH keypair in order to run this script!"
60     fi
61
62     #-------------------------------------------------------------------------------
63     # We are using sudo so we need to make sure that env_reset is not present
64     #-------------------------------------------------------------------------------
65     sudo sed -i "s/^Defaults.*env_reset/#&/" /etc/sudoers
66
67     #-------------------------------------------------------------------------------
68     # Installing prerequisites
69     #-------------------------------------------------------------------------------
70     if [ "$OS_ID" == "ubuntu" ]; then
71
72       sudo apt update -y
73       ansible --version
74       RESULT=$?
75       if [ $RESULT -ne 0 ]; then
76         sudo apt-add-repository --yes --update ppa:ansible/ansible
77         sudo apt-get install -y ansible
78       fi
79
80       yq --version
81       RESULT=$?
82       if [ $RESULT -ne 0 ]; then
83         sudo wget https://github.com/mikefarah/yq/releases/download/3.4.1/yq_linux_amd64 -O /usr/bin/yq
84         sudo chmod +x /usr/bin/yq
85       fi
86
87       virsh --version
88       RESULT=$?
89       if [ $RESULT -ne 0 ]; then
90         sudo apt-get install -y virsh
91       fi
92
93       jq --version
94       RESULT=$?
95       if [ $RESULT -ne 0 ]; then
96         sudo apt-get install -y jq
97       fi
98
99       virtualenv --version
100       RESULT=$?
101       if [ $RESULT -ne 0 ]; then
102         sudo apt-get install -y virtualenv
103       fi
104
105       pip --version
106       if [ $RESULT -ne 0 ]; then
107         sudo apt-get install -y pip
108       fi
109
110     elif [ "$OS_ID" == "centos" ]; then
111
112       sudo yum update -y
113       ansible --version
114       RESULT=$?
115       if [ $RESULT -ne 0 ]; then
116         sudo dnf install epel-release
117         sudo dnf install ansible
118       fi
119
120       yq --version
121       RESULT=$?
122       if [ $RESULT -ne 0 ]; then
123         sudo wget https://github.com/mikefarah/yq/releases/download/3.4.1/yq_linux_amd64 -O /usr/bin/yq
124         sudo chmod +x /usr/bin/yq
125       fi
126
127       virsh --version
128       RESULT=$?
129       if [ $RESULT -ne 0 ]; then
130         sudo yum install -y virsh
131       fi
132
133       jq --version
134       RESULT=$?
135       if [ $RESULT -ne 0 ]; then
136         sudo yum install -y jq
137       fi
138
139       virtualenv --version
140       RESULT=$?
141       if [ $RESULT -ne 0 ]; then
142         sudo yum install -y virtualenv
143       fi
144
145       pip --version
146       if [ $RESULT -ne 0 ]; then
147         sudo yum install -y pip
148       fi
149     fi
150
151     #-------------------------------------------------------------------------------
152     # Check if necessary tools are installed
153     #-------------------------------------------------------------------------------
154     for tool in ansible yq virsh jq docker virtualenv pip; do
155         if ! command -v "$tool" &> /dev/null; then
156             error "$tool not found. Please install."
157         fi
158     done
159
160     #-------------------------------------------------------------------------------
161     # Check if user belongs to libvirt's group
162     #-------------------------------------------------------------------------------
163     libvirt_group="libvirt"
164     # shellcheck disable=SC1091
165     source /etc/os-release || source /usr/lib/os-release
166     if [ "${ID,,}" == "ubuntu" ] && [ "$VERSION_ID" == "16.04" ]; then
167         libvirt_group+="d"
168     fi
169     if ! groups | grep "$libvirt_group"; then
170         error "$(id -nu) user doesn't belong to $libvirt_group group."
171     fi
172 }
173
174 # Get jumphost VM PXE IP
175 get_host_pxe_ip() {
176     local PXE_NETWORK
177     local PXE_IF_INDEX
178     local PXE_IF_IP
179
180     host=$1
181     assert_non_empty "$host" "get_ip - host parameter not provided"
182
183     PXE_NETWORK=$(yq r "$CURRENTPATH"/hw_config/"$VENDOR"/idf.yaml engine.pxe_network)
184     assert_non_empty "$PXE_NETWORK" "PXE network for jump VM not defined in IDF."
185
186     PXE_IF_INDEX=$(yq r "$CURRENTPATH"/hw_config/"${VENDOR}"/idf.yaml idf.net_config."$PXE_NETWORK".interface)
187     assert_non_empty "$PXE_IF_INDEX" "Index of PXE interface not found in IDF."
188
189     PXE_IF_IP=$(yq r "$CURRENTPATH"/hw_config/"${VENDOR}"/pdf.yaml "$host".interfaces["$PXE_IF_INDEX"].address)
190     assert_non_empty "$PXE_IF_IP" "IP of PXE interface not found in PDF."
191
192     echo "$PXE_IF_IP"
193 }
194
195 # Get public MAC for VM
196 get_host_pub_mac() {
197     local PUB_NETWORK
198     local PUB_IF_INDEX
199     local PUB_IF_MAC
200
201     host=$1
202     assert_non_empty "$host" "get_mac - host parameter not provided"
203
204     PUB_NETWORK=$(yq r "$CURRENTPATH"/hw_config/"$VENDOR"/idf.yaml  engine.public_network)
205     assert_non_empty "$PUB_NETWORK" "Public network for jump VM not defined in IDF."
206
207     PUB_IF_INDEX=$(yq r "$CURRENTPATH"/hw_config/"${VENDOR}"/idf.yaml idf.net_config."$PUB_NETWORK".interface)
208     assert_non_empty "$PUB_IF_INDEX" "Index of public interface not found in IDF."
209
210     PUB_IF_MAC=$(yq r "$CURRENTPATH"/hw_config/"${VENDOR}"/pdf.yaml "$host".interfaces["$PUB_IF_INDEX"].mac_address)
211     assert_non_empty "$PUB_IF_MAC" "MAC of public interface not found in PDF."
212     echo "$PUB_IF_MAC"
213 }
214
215 # Get jumphost VM IP
216 get_vm_ip() {
217     if [[ "$DEPLOYMENT" == "full" ]]; then
218         ip=$(get_host_pxe_ip "jumphost")
219     else
220         mac=$(get_host_pub_mac "jumphost")
221         JUMPHOST_NAME=$(yq r "$CURRENTPATH"/hw_config/"$VENDOR"/pdf.yaml jumphost.name)
222         ipblock=$(virsh domifaddr "$JUMPHOST_NAME" --full | grep "$mac" | awk '{print $4}' | tail -n 1)
223         assert_non_empty "$ipblock" "IP subnet for VM not available."
224         ip="${ipblock%/*}"
225     fi
226     echo "$ip"
227 }
228
229 # Copy files needed by Infra engine & BMRA in the jumphost VM
230 copy_files_jump() {
231     vm_ip="$(get_vm_ip)"
232     docker_config="/opt/kuberef/docker_config"
233     scp -r -o StrictHostKeyChecking=no \
234     "$CURRENTPATH"/{hw_config/"$VENDOR"/,sw_config/"$INSTALLER"/} \
235     "$USERNAME@${vm_ip}:$PROJECT_ROOT"
236     if [[ "$DEPLOYMENT" != "full" ]]; then
237         scp -r -o StrictHostKeyChecking=no \
238         ~/.ssh/id_rsa \
239         "$USERNAME@${vm_ip}:.ssh/id_rsa"
240     fi
241     if [ -f "$docker_config" ]; then
242         scp -r -o StrictHostKeyChecking=no \
243         "$docker_config" "$USERNAME@${vm_ip}:$PROJECT_ROOT"
244     fi
245 }
246
247 # Host Provisioning
248 provision_hosts_baremetal() {
249     CMD="./deploy.sh -s ironic -d ${DISTRO} -p file:///${PROJECT_ROOT}/engine/engine/pdf.yaml -i file:///${PROJECT_ROOT}/engine/engine/idf.yaml"
250     if [ "${DEBUG:-false}" == "true" ]; then
251         CMD+=" -v"
252     fi
253
254     # shellcheck disable=SC2087
255     ssh -o StrictHostKeyChecking=no -tT "$USERNAME"@"$(get_vm_ip)" << EOF
256 # Install and run cloud-infra
257 if [ ! -d "${PROJECT_ROOT}/engine" ]; then
258     ssh-keygen -t rsa -N "" -f "${PROJECT_ROOT}"/.ssh/id_rsa
259     git clone https://gerrit.nordix.org/infra/engine.git
260 fi
261 cp "${PROJECT_ROOT}"/"${VENDOR}"/{pdf.yaml,idf.yaml} \
262 "${PROJECT_ROOT}"/engine/engine
263 cd "${PROJECT_ROOT}"/engine/engine || return
264 ${CMD}
265 EOF
266 }
267
268 provision_hosts_vms() {
269     # shellcheck disable=SC2087
270     # Install and run cloud-infra
271     if [ ! -d "${CURRENTPATH}/engine" ]; then
272         git clone https://gerrit.nordix.org/infra/engine.git "${CURRENTPATH}"/engine
273     fi
274     cp "${CURRENTPATH}"/hw_config/"${VENDOR}"/{pdf.yaml,idf.yaml} "${CURRENTPATH}"/engine/engine
275     cd "${CURRENTPATH}"/engine/engine || return
276     CMD="./deploy.sh -s ironic -p file:///${CURRENTPATH}/engine/engine/pdf.yaml -i file:///${CURRENTPATH}/engine/engine/idf.yaml"
277     if [ "${DEBUG:-false}" == "true" ]; then
278         CMD+=" -v"
279     fi
280
281     ${CMD}
282 }
283
284 # Setup networking on provisioned hosts (Adapt setup_network.sh according to your network setup)
285 setup_network() {
286     # Set Upper limit of number nodes in RI2 cluster (starting from 0)
287     NODE_MAX_ID=$(($(yq r "$CURRENTPATH"/hw_config/"$VENDOR"/idf.yaml --length idf.kubespray.hostnames)-1))
288
289     for idx in $(seq 0 "$NODE_MAX_ID"); do
290         NODE_IP=$(get_host_pxe_ip "nodes[${idx}]")
291         # SSH to jumphost
292         # shellcheck disable=SC2087
293         ssh -o StrictHostKeyChecking=no -tT "$USERNAME"@"$(get_vm_ip)" << EOF
294 ssh -o StrictHostKeyChecking=no root@"${NODE_IP}" \
295     'bash -s' <  "${PROJECT_ROOT}"/"${VENDOR}"/setup_network.sh
296 EOF
297     done
298 }
299
300 # k8s Provisioning (currently BMRA)
301 provision_k8s_baremetal() {
302     ansible_cmd="/bin/bash -c '"
303     if [[ "$DEPLOYMENT" == "k8s" ]]; then
304         ansible-playbook -i "$CURRENTPATH"/sw_config/bmra/inventory.ini "$CURRENTPATH"/playbooks/pre-install.yaml
305         ansible_cmd+="yum -y remove python-netaddr; ansible-playbook -i /bmra/inventory.ini /bmra/playbooks/k8s/patch_kubespray.yml;"
306     fi
307     ansible_cmd+="ansible-playbook -i /bmra/inventory.ini /bmra/playbooks/${BMRA_PROFILE}.yml'"
308
309     # shellcheck disable=SC2087
310     ssh -o StrictHostKeyChecking=no -tT "$USERNAME"@"$(get_vm_ip)" << EOF
311 # Install BMRA
312 if ! command -v docker; then
313     curl -fsSL https://get.docker.com/ | sh
314     printf "Waiting for docker service..."
315     until sudo docker info; do
316         printf "."
317         sleep 2
318     done
319 fi
320 if [ ! -d "${PROJECT_ROOT}/container-experience-kits" ]; then
321     git clone --recurse-submodules --depth 1 https://github.com/intel/container-experience-kits.git -b v21.08 "${PROJECT_ROOT}"/container-experience-kits/
322     cp -r "${PROJECT_ROOT}"/container-experience-kits/examples/"${BMRA_PROFILE}"/group_vars "${PROJECT_ROOT}"/container-experience-kits/
323 fi
324 if [ -f "${PROJECT_ROOT}/docker_config" ]; then
325     cp "${PROJECT_ROOT}"/docker_config \
326         "${PROJECT_ROOT}"/"${INSTALLER}"/dockerhub_credentials/vars/main.yml
327     cp -r "${PROJECT_ROOT}"/"${INSTALLER}"/dockerhub_credentials \
328         "${PROJECT_ROOT}"/container-experience-kits/roles/
329     cp "${PROJECT_ROOT}"/"${INSTALLER}"/patched_k8s.yml \
330         "${PROJECT_ROOT}"/container-experience-kits/playbooks/k8s/k8s.yml
331 fi
332 cp "${PROJECT_ROOT}"/"${INSTALLER}"/{inventory.ini,ansible.cfg} \
333     "${PROJECT_ROOT}"/container-experience-kits/
334 cp "${PROJECT_ROOT}"/"${INSTALLER}"/{all.yml,kube-node.yml} \
335     "${PROJECT_ROOT}"/container-experience-kits/group_vars/
336 cp "${PROJECT_ROOT}"/"${INSTALLER}"/patched_cmk_build.yml \
337     "${PROJECT_ROOT}"/container-experience-kits/roles/cmk_install/tasks/main.yml
338 cp "${PROJECT_ROOT}"/"${INSTALLER}"/patched_vfio.yml \
339     "${PROJECT_ROOT}"/container-experience-kits/roles/sriov_nic_init/tasks/bind_vf_driver.yml
340 cp "${PROJECT_ROOT}"/"${INSTALLER}"/patched_rhel_packages.yml \
341     "${PROJECT_ROOT}"/container-experience-kits/roles/bootstrap/install_packages/tasks/rhel.yml
342 cp "${PROJECT_ROOT}"/"${INSTALLER}"/patched_packages.yml \
343     "${PROJECT_ROOT}"/container-experience-kits/roles/bootstrap/install_packages/tasks/main.yml
344 cp "${PROJECT_ROOT}"/"${INSTALLER}"/patched_kubespray_requirements.txt \
345     "${PROJECT_ROOT}"/container-experience-kits/playbooks/k8s/kubespray/requirements.txt
346 cp "${PROJECT_ROOT}"/"${INSTALLER}"/patched_preflight.yml \
347     "${PROJECT_ROOT}"/container-experience-kits/playbooks/preflight.yml
348 cp "${PROJECT_ROOT}"/"${INSTALLER}"/patched_sriov_cni_install.yml \
349     "${PROJECT_ROOT}"/container-experience-kits/roles/sriov_cni_install/tasks/main.yml
350 cp "${PROJECT_ROOT}"/"${INSTALLER}"/patched_install_dpdk_meson.yml \
351     "${PROJECT_ROOT}"/container-experience-kits/roles/install_dpdk/tasks/install_dpdk_meson.yml
352
353 sudo docker run --rm \
354 -e ANSIBLE_CONFIG=/bmra/ansible.cfg \
355 -e PROFILE="${BMRA_PROFILE}" \
356 -v "${PROJECT_ROOT}"/container-experience-kits:/bmra \
357 -v ~/.ssh/:/root/.ssh/ rihabbanday/bmra21.08-install:centos \
358 ${ansible_cmd}
359 EOF
360 }
361
362 provision_k8s_vms() {
363     # shellcheck disable=SC2087
364 # Install BMRA
365 if [ ! -d "${CURRENTPATH}/container-experience-kits" ]; then
366     git clone --recurse-submodules --depth 1 https://github.com/intel/container-experience-kits.git -b v21.08 "${CURRENTPATH}"/container-experience-kits/
367     cp -r "${CURRENTPATH}"/container-experience-kits/examples/"${BMRA_PROFILE}"/group_vars "${CURRENTPATH}"/container-experience-kits/
368 fi
369 cp "${CURRENTPATH}"/sw_config/bmra/{inventory.ini,ansible.cfg} \
370     "${CURRENTPATH}"/container-experience-kits/
371 cp "${CURRENTPATH}"/sw_config/bmra/{all.yml,kube-node.yml} \
372     "${CURRENTPATH}"/container-experience-kits/group_vars/
373 cp "${CURRENTPATH}"/sw_config/bmra/patched_cmk_build.yml \
374     "${CURRENTPATH}"/container-experience-kits/roles/cmk_install/tasks/main.yml
375 cp "${CURRENTPATH}"/sw_config/bmra/patched_vfio.yml \
376    "${CURRENTPATH}"/container-experience-kits/roles/sriov_nic_init/tasks/bind_vf_driver.yml
377 cp "${CURRENTPATH}"/sw_config/bmra/patched_rhel_packages.yml \
378     "${CURRENTPATH}"/container-experience-kits/roles/bootstrap/install_packages/tasks/rhel.yml
379 cp "${CURRENTPATH}"/sw_config/bmra/patched_packages.yml \
380     "${CURRENTPATH}"/container-experience-kits/roles/bootstrap/install_packages/tasks/main.yml
381 cp "${CURRENTPATH}"/sw_config/"${INSTALLER}"/patched_kubespray_requirements.txt \
382     "${CURRENTPATH}"/container-experience-kits/playbooks/k8s/kubespray/requirements.txt
383 cp "${CURRENTPATH}"/sw_config/"${INSTALLER}"/patched_preflight.yml \
384     "${CURRENTPATH}"/container-experience-kits/playbooks/preflight.yml
385 cp "${CURRENTPATH}"/sw_config/"${INSTALLER}"/patched_sriov_cni_install.yml \
386     "${CURRENTPATH}"/container-experience-kits/roles/sriov_cni_install/tasks/main.yml
387 cp "${CURRENTPATH}"/sw_config/"${INSTALLER}"/patched_install_dpdk_meson.yml \
388     "${CURRENTPATH}"/container-experience-kits/roles/install_dpdk/tasks/install_dpdk_meson.yml
389
390 ansible-playbook -i "$CURRENTPATH"/sw_config/bmra/inventory.ini "$CURRENTPATH"/playbooks/pre-install.yaml
391
392 # Ansible upgrade below can be removed once image is updated
393 sudo docker run --rm \
394 -e ANSIBLE_CONFIG=/bmra/ansible.cfg \
395 -e PROFILE="${BMRA_PROFILE}" \
396 -v "${CURRENTPATH}"/container-experience-kits:/bmra \
397 -v ~/.ssh/:/root/.ssh/ rihabbanday/bmra21.08-install:centos \
398 ansible-playbook -i /bmra/inventory.ini /bmra/playbooks/"${BMRA_PROFILE}".yml
399 }
400
401 # Copy kubeconfig to the appropriate location needed by functest containers
402 copy_k8s_config() {
403 # TODO Use Kubespray variables in BMRA to simplify this
404     MASTER_IP=$(get_host_pxe_ip "nodes[0]")
405     # shellcheck disable=SC2087
406     ssh -o StrictHostKeyChecking=no -tT "$USERNAME"@"$(get_vm_ip)" << EOF
407 scp -o StrictHostKeyChecking=no -q root@"$MASTER_IP":/root/.kube/config "${PROJECT_ROOT}"/kubeconfig
408 sed -i 's/127.0.0.1/$MASTER_IP/g' "${PROJECT_ROOT}"/kubeconfig
409 EOF
410
411 # Copy kubeconfig from Jump VM to appropriate location in Jump Host
412 # Direct scp to the specified location doesn't work due to permission/ssh-keys
413     scp  -o StrictHostKeyChecking=no "$USERNAME"@"$(get_vm_ip)":"${PROJECT_ROOT}"/kubeconfig kubeconfig
414     if [ -d "/home/opnfv/functest-kubernetes" ]; then
415         sudo cp kubeconfig /home/opnfv/functest-kubernetes/config
416     fi
417 }
418
419 # Creates a python virtual environment
420 creates_virtualenv() {
421     if [  ! -d "$CURRENTPATH/.venv" ]; then
422         virtualenv "$CURRENTPATH/.venv"
423     fi
424     # shellcheck disable=SC1090
425     source "$CURRENTPATH/.venv/bin/activate"
426     pip install -r "$CURRENTPATH/requirements.txt"
427 }
428
429 # Executes a specific Ansible playbook
430 run_playbook() {
431     ansible_cmd="$(command -v ansible-playbook) -i $CURRENTPATH/inventory/localhost.ini -e ansible_python_interpreter=$(command -v python)"
432     if [ "${DEBUG:-false}" == "true" ]; then
433         ansible_cmd+=" -vvv"
434     fi
435     eval "$ansible_cmd $CURRENTPATH/playbooks/${1}.yaml"
436 }