#!/bin/bash -e
+# shellcheck disable=SC1090,SC2155
##############################################################################
# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
# All rights reserved. This program and the accompanying materials
CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
ERASE_ENV=${ERASE_ENV:-0}
-# shellcheck disable=SC1090
+source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/globals.sh"
source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
# Wait for MaaS commissioning/deploy to finish, retry on failure
function maas_fixup() {
local statuscmd="salt 'mas01*' --out yaml state.apply maas.machines.status"
- # shellcheck disable=SC2155
local ncount=$(salt --out yaml 'mas01*' pillar.get maas:region:machines | \
grep -cE '^\s{2}\w+:$')
wait_for 180 "${statuscmd} | tee /dev/stderr | " \
"grep -Eq '((Deployed|Ready): ${ncount}|status: (Failed|Allocated))'"
- # shellcheck disable=SC2155
local statusout=$(eval "${statuscmd}")
- # shellcheck disable=SC2155
local fcnodes=$(echo "${statusout}" | \
grep -Pzo 'status: Failed commissioning\n\s+system_id: \K.+\n')
for node_system_id in ${fcnodes}; do
return 1
fi
- # shellcheck disable=SC2155
local fdnodes=$(echo "${statusout}" | \
grep -Pzo 'status: (Failed deployment|Allocated)\n\s+system_id: \K.+\n')
for node_system_id in ${fdnodes}; do
# Optionally destroy MaaS machines from a previous run
if [ "${ERASE_ENV}" -gt 1 ]; then
- dnodes=$(salt 'mas01*' --out yaml state.apply maas.machines.status | \
- grep -Pzo '\s+system_id: \K.+\n')
+ set +e; dnodes=$(salt 'mas01*' --out yaml state.apply maas.machines.status | \
+ grep -Pzo '\s+system_id: \K.+\n'); set -e
+ cleanup_uefi
for node_system_id in ${dnodes}; do
salt -C 'mas01*' state.apply maas.machines.delete \
pillar="{'system_id': '${node_system_id}'}"
- sleep 30
+ sleep 10
done
fi
# MaaS rack/region controller, node commissioning
salt -C 'mas01*' state.apply linux,salt,openssh,ntp
- salt -C 'mas01*' state.apply linux.network.interface
salt -C 'mas01*' state.apply maas.pxe_nat
salt -C 'mas01*' state.apply maas.cluster
- salt -C 'cfg01*' state.apply maas.pxe_route
wait_for 10 "salt -C 'mas01*' state.apply maas.region"
maas_db_password: opnfv_secret
dns_server01: ${_param:opnfv_dns_server01}
single_address: ${_param:infra_maas_node01_deploy_address}
+ hwe_kernel: 'hwe-16.04-edge'
maas:
region:
- salt_master_ip: ${_param:infra_config_deploy_address}
+ salt_master_ip: ${_param:reclass_config_master}
domain: ${_param:cluster_domain}
maas_config:
commissioning_distro_series: 'xenial'
dnssec_validation: 'no'
enable_third_party_drivers: true
network_discovery: 'enabled'
- default_min_hwe_kernel: 'hwe-16.04'
+ default_min_hwe_kernel: ${_param:hwe_kernel}
subnets:
opnfv_maas_pxe:
name: ${_param:opnfv_infra_maas_pxe_network_address}/24
power_user: ${_param:opnfv_maas_node01_power_user}
architecture: ${_param:opnfv_maas_node01_architecture}
distro_series: xenial
- hwe_kernel: hwe-16.04
+ hwe_kernel: ${_param:hwe_kernel}
kvm02:
interface:
mac: ${_param:opnfv_maas_node02_interface_mac}
power_user: ${_param:opnfv_maas_node02_power_user}
architecture: ${_param:opnfv_maas_node02_architecture}
distro_series: xenial
- hwe_kernel: hwe-16.04
+ hwe_kernel: ${_param:hwe_kernel}
kvm03:
interface:
mac: ${_param:opnfv_maas_node03_interface_mac}
power_user: ${_param:opnfv_maas_node03_power_user}
architecture: ${_param:opnfv_maas_node03_architecture}
distro_series: xenial
- hwe_kernel: hwe-16.04
+ hwe_kernel: ${_param:hwe_kernel}
cmp001:
interface:
mac: ${_param:opnfv_maas_node04_interface_mac}
power_user: ${_param:opnfv_maas_node04_power_user}
architecture: ${_param:opnfv_maas_node04_architecture}
distro_series: xenial
- hwe_kernel: hwe-16.04
+ hwe_kernel: ${_param:hwe_kernel}
cmp002:
interface:
mac: ${_param:opnfv_maas_node05_interface_mac}
power_user: ${_param:opnfv_maas_node05_power_user}
architecture: ${_param:opnfv_maas_node05_architecture}
distro_series: xenial
- hwe_kernel: hwe-16.04
+ hwe_kernel: ${_param:hwe_kernel}
linux:
network:
interface:
function cleanup_uefi {
# Clean up Ubuntu boot entry if cfg01, kvm nodes online from previous deploy
- # shellcheck disable=SC2086
- ssh ${SSH_OPTS} "${SSH_SALT}" "sudo salt -C 'kvm* or cmp*' cmd.run \
+ local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}"
+ [ ! "$(hostname)" = 'cfg01' ] || cmd_str='eval'
+ ${cmd_str} "sudo salt -C 'kvm* or cmp*' cmd.run \
\"which efibootmgr > /dev/null 2>&1 && \
efibootmgr | grep -oP '(?<=Boot)[0-9]+(?=.*ubuntu)' | \
xargs -I{} efibootmgr --delete-bootnum --bootnum {}; \
# create required networks, including constant "mcpcontrol"
# FIXME(alav): since we renamed "pxe" to "mcpcontrol", we need to make sure
# we delete the old "pxe" virtual network, or it would cause IP conflicts.
- # FIXME(alav): The same applies for "fuel1" virsh network.
- for net in "fuel1" "pxe" "mcpcontrol" "${vnode_networks[@]}"; do
+ for net in "pxe" "mcpcontrol" "${vnode_networks[@]}"; do
if virsh net-info "${net}" >/dev/null 2>&1; then
virsh net-destroy "${net}" || true
virsh net-undefine "${net}"
# prepare network args
net_args=" --network network=mcpcontrol,model=virtio"
- if [ "${vnode_data[0]}" = "mas01" ]; then
- # MaaS node's 3rd interface gets connected to PXE/Admin Bridge
+ if [ "${DEPLOY_TYPE:-}" = 'baremetal' ]; then
+ # 3rd interface gets connected to PXE/Admin Bridge (cfg01, mas01)
vnode_networks[2]="${vnode_networks[0]}"
fi
for net in "${vnode_networks[@]:1}"; do