X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=ci%2Fdeploy.sh;h=22a7b8aa5ac6df63968cb30c31dc4588ef5c1734;hb=c3b974355123fe6e5cb2d4cb44215c4de3abd108;hp=1abc3c49360dc6d037895388ecbe479cb955b2df;hpb=c475784616c60546f228fbdd75f3fe547f5079ac;p=apex.git diff --git a/ci/deploy.sh b/ci/deploy.sh index 1abc3c49..4f123e10 100755 --- a/ci/deploy.sh +++ b/ci/deploy.sh @@ -1,4 +1,12 @@ #!/bin/bash +############################################################################## +# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## # Deploy script to install provisioning server for OPNFV Apex # author: Dan Radez (dradez@redhat.com) @@ -9,34 +17,55 @@ set -e ##VARIABLES -if [ "$TERM" != "unknown" ]; then - reset=$(tput sgr0) - blue=$(tput setaf 4) - red=$(tput setaf 1) - green=$(tput setaf 2) -else - reset="" - blue="" - red="" - green="" -fi +reset=$(tput sgr0 || echo "") +blue=$(tput setaf 4 || echo "") +red=$(tput setaf 1 || echo "") +green=$(tput setaf 2 || echo "") -vm_index=4 -ha_enabled="TRUE" +interactive="FALSE" ping_site="8.8.8.8" ntp_server="pool.ntp.org" net_isolation_enabled="TRUE" +post_config="TRUE" +debug="FALSE" declare -i CNT declare UNDERCLOUD declare -A deploy_options_array +declare -a performance_options +declare -A NET_MAP SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error) DEPLOY_OPTIONS="" -RESOURCES=/var/opt/opnfv/stack -CONFIG=/var/opt/opnfv -INSTACKENV=$CONFIG/instackenv.json -NETENV=$CONFIG/network-environment.yaml +CONFIG=${CONFIG:-'/var/opt/opnfv'} +RESOURCES=${RESOURCES:-"$CONFIG/images"} +LIB=${LIB:-"$CONFIG/lib"} +OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network" + +VM_CPUS=4 +VM_RAM=8 +VM_COMPUTES=2 + +# Netmap used to map networks to OVS bridge names +NET_MAP['admin_network']="br-admin" +NET_MAP['private_network']="br-private" +NET_MAP['public_network']="br-public" +NET_MAP['storage_network']="br-storage" +ext_net_type="interface" +ip_address_family=4 + +# Libraries +lib_files=( +$LIB/common-functions.sh +$LIB/utility-functions.sh +$LIB/installer/onos/onos_gw_mac_update.sh +) +for lib_file in ${lib_files[@]}; do + if ! source $lib_file; then + echo -e "${red}ERROR: Failed to source $lib_file${reset}" + exit 1 + fi +done ##FUNCTIONS ##translates yaml into variables @@ -90,40 +119,31 @@ parse_setting_value() { local mystr=$1 echo $(echo $mystr | grep -Eo "\=.*$" | tr -d '=') } -##parses deploy settings yaml into globals and options array -##params: none -##usage: parse_deploy_settings + +##parses network settings yaml into globals +parse_network_settings() { + local output + if output=$(python3.4 -B $LIB/python/apex-python-utils.py parse-net-settings -s $NETSETS -i $net_isolation_enabled -e $CONFIG/network-environment.yaml); then + echo -e "${blue}${output}${reset}" + eval "$output" + else + echo -e "${red}ERROR: Failed to parse network settings file $NETSETS ${reset}" + exit 1 + fi +} + +##parses deploy settings yaml into globals parse_deploy_settings() { - local global_prefix="deploy_global_params_" - local options_prefix="deploy_deploy_options_" - local myvar myvalue - local settings=$(parse_yaml $DEPLOY_SETTINGS_FILE "deploy_") - - for this_setting in $settings; do - if contains_prefix $this_setting $global_prefix; then - myvar=$(parse_setting_var $this_setting $global_prefix) - if [ -z "$myvar" ]; then - echo -e "${red}ERROR: while parsing ${DEPLOY_SETTINGS_FILE} for setting: ${this_setting}${reset}" - fi - myvalue=$(parse_setting_value $this_setting) - # Do not override variables set by cmdline - if [ -z "$(eval echo \$$myvar)" ]; then - eval "$myvar=\$myvalue" - echo -e "${blue}Global parameter set: ${myvar}:${myvalue}${reset}" - else - echo -e "${blue}Global parameter already set: ${myvar}${reset}" - fi - elif contains_prefix $this_setting $options_prefix; then - myvar=$(parse_setting_var $this_setting $options_prefix) - if [ -z "$myvar" ]; then - echo -e "${red}ERROR: while parsing ${DEPLOY_SETTINGS_FILE} for setting: ${this_setting}${reset}" - fi - myvalue=$(parse_setting_value $this_setting) - deploy_options_array[$myvar]=$myvalue - echo -e "${blue}Deploy option set: ${myvar}:${myvalue}${reset}" - fi - done + local output + if output=$(python3.4 -B $LIB/python/apex-python-utils.py parse-deploy-settings -f $DEPLOY_SETTINGS_FILE); then + echo -e "${blue}${output}${reset}" + eval "$output" + else + echo -e "${red}ERROR: Failed to parse deploy settings file $DEPLOY_SETTINGS_FILE ${reset}" + exit 1 + fi } + ##parses baremetal yaml settings into compatible json ##writes the json to $CONFIG/instackenv_tmp.json ##params: none @@ -140,7 +160,7 @@ parse_inventory_file() { for entry in $inventory; do if echo $entry | grep -Eo "^nodes_node[0-9]+_" > /dev/null; then this_node=$(echo $entry | grep -Eo "^nodes_node[0-9]+_") - if [[ $inventory_list != *"$this_node"* ]]; then + if [[ "$inventory_list" != *"$this_node"* ]]; then inventory_list+="$this_node " fi fi @@ -154,7 +174,7 @@ parse_inventory_file() { node_total=$node_count - if [[ "$node_total" -lt 5 && ha_enabled == "TRUE" ]]; then + if [[ "$node_total" -lt 5 && "$ha_enabled" == "True" ]]; then echo -e "${red}ERROR: You must provide at least 5 nodes for HA baremetal deployment${reset}" exit 1 elif [[ "$node_total" -lt 2 ]]; then @@ -162,9 +182,12 @@ parse_inventory_file() { exit 1 fi - eval $(parse_yaml $INVENTORY_FILE) + eval $(parse_yaml $INVENTORY_FILE) || { + echo "${red}Failed to parse inventory.yaml. Aborting.${reset}" + exit 1 + } - instack_env_output=" + instackenv_output=" { \"nodes\" : [ @@ -175,7 +198,7 @@ parse_inventory_file() { node_output=" { \"pm_password\": \"$(eval echo \${${node}ipmi_pass})\", - \"pm_type\": \"pxe_ipmitool\", + \"pm_type\": \"$(eval echo \${${node}pm_type})\", \"mac\": [ \"$(eval echo \${${node}mac_address})\" ], @@ -187,23 +210,23 @@ parse_inventory_file() { \"pm_addr\": \"$(eval echo \${${node}ipmi_ip})\", \"capabilities\": \"$(eval echo \${${node}capabilities})\" " - instack_env_output+=${node_output} + instackenv_output+=${node_output} if [ $node_count -lt $node_total ]; then - instack_env_output+=" }," + instackenv_output+=" }," else - instack_env_output+=" }" + instackenv_output+=" }" fi done - instack_env_output+=' + instackenv_output+=' ] } ' #Copy instackenv.json to undercloud for baremetal - echo -e "{blue}Parsed instackenv JSON:\n${instack_env_output}${reset}" + echo -e "{blue}Parsed instackenv JSON:\n${instackenv_output}${reset}" ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" < instackenv.json << EOF -$instack_env_output +$instackenv_output EOF EOI @@ -239,32 +262,106 @@ function configure_deps { sudo sh -c "echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf" fi - # ensure brbm networks are configured - systemctl start openvswitch - ovs-vsctl list-br | grep brbm > /dev/null || ovs-vsctl add-br brbm - virsh net-list --all | grep brbm > /dev/null || virsh net-create $CONFIG/brbm-net.xml - virsh net-list | grep -E "brbm\s+active" > /dev/null || virsh net-start brbm - ovs-vsctl list-br | grep brbm1 > /dev/null || ovs-vsctl add-br brbm1 - virsh net-list --all | grep brbm1 > /dev/null || virsh net-create $CONFIG/brbm1-net.xml - virsh net-list | grep -E "brbm1\s+active" > /dev/null || virsh net-start brbm1 + # ensure no dhcp server is running on jumphost + if ! sudo systemctl status dhcpd | grep dead; then + echo "${red}WARN: DHCP Server detected on jumphost, disabling...${reset}" + sudo systemctl stop dhcpd + sudo systemctl disable dhcpd + fi - # ensure storage pool exists and is started - virsh pool-list --all | grep default > /dev/null || virsh pool-create $CONFIG/default-pool.xml - virsh pool-list | grep -Eo "default\s+active" > /dev/null || virsh pool-start default - - if virsh net-list | grep default > /dev/null; then - num_ints_same_subnet=$(ip addr show | grep "inet 192.168.122" | wc -l) - if [ "$num_ints_same_subnet" -gt 1 ]; then - virsh net-destroy default - ##go edit /etc/libvirt/qemu/networks/default.xml - sed -i 's/192.168.122/192.168.123/g' /etc/libvirt/qemu/networks/default.xml - sed -i 's/192.168.122/192.168.123/g' instackenv-virt.json - sleep 5 - virsh net-start default - virsh net-autostart default - fi + # ensure networks are configured + systemctl status libvirtd || systemctl start libvirtd + systemctl status openvswitch || systemctl start openvswitch + + # If flat we only use admin network + if [[ "$net_isolation_enabled" == "FALSE" ]]; then + virsh_enabled_networks="admin_network" + enabled_network_list="admin_network" + # For baremetal we only need to create/attach Undercloud to admin and public + elif [ "$virtual" == "FALSE" ]; then + virsh_enabled_networks="admin_network public_network" + else + virsh_enabled_networks=$enabled_network_list fi + # ensure default network is configured correctly + libvirt_dir="/usr/share/libvirt/networks" + virsh net-list --all | grep default || virsh net-define ${libvirt_dir}/default.xml + virsh net-list --all | grep -E "default\s+active" > /dev/null || virsh net-start default + virsh net-list --all | grep -E "default\s+active\s+yes" > /dev/null || virsh net-autostart --network default + + if [[ -z "$virtual" || "$virtual" == "FALSE" ]]; then + for network in ${OPNFV_NETWORK_TYPES}; do + echo "${blue}INFO: Creating Virsh Network: $network & OVS Bridge: ${NET_MAP[$network]}${reset}" + ovs-vsctl list-br | grep "^${NET_MAP[$network]}$" > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]} + virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF + + $network + + + + +EOF + if ! (virsh net-list --all | grep $network > /dev/null); then + echo "${red}ERROR: unable to create network: ${network}${reset}" + exit 1; + fi + rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null; + virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network + virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network + done + + echo -e "${blue}INFO: Bridges set: ${reset}" + ovs-vsctl list-br + + # bridge interfaces to correct OVS instances for baremetal deployment + for network in ${enabled_network_list}; do + if [[ "$network" != "admin_network" && "$network" != "public_network" ]]; then + continue + fi + this_interface=$(eval echo \${${network}_bridged_interface}) + # check if this a bridged interface for this network + if [[ ! -z "$this_interface" || "$this_interface" != "none" ]]; then + if ! attach_interface_to_ovs ${NET_MAP[$network]} ${this_interface} ${network}; then + echo -e "${red}ERROR: Unable to bridge interface ${this_interface} to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}" + exit 1 + else + echo -e "${blue}INFO: Interface ${this_interface} bridged to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}" + fi + else + echo "${red}ERROR: Unable to determine interface to bridge to for enabled network: ${network}${reset}" + exit 1 + fi + done + else + for network in ${OPNFV_NETWORK_TYPES}; do + echo "${blue}INFO: Creating Virsh Network: $network${reset}" + virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF + +$network + + +EOF + if ! (virsh net-list --all | grep $network > /dev/null); then + echo "${red}ERROR: unable to create network: ${network}${reset}" + exit 1; + fi + rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null; + virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network + virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network + done + + echo -e "${blue}INFO: Bridges set: ${reset}" + brctl show + fi + + echo -e "${blue}INFO: virsh networks set: ${reset}" + virsh net-list + + # ensure storage pool exists and is started + virsh pool-list --all | grep default > /dev/null || virsh pool-define-as --name default dir --target /var/lib/libvirt/images + virsh pool-list | grep -Eo "default\s+active" > /dev/null || (virsh pool-autostart default; virsh pool-start default) + if ! egrep '^flags.*(vmx|svm)' /proc/cpuinfo > /dev/null; then echo "${red}virtualization extensions not found, kvm kernel module insertion may fail.\n \ Are you sure you have enabled vmx in your bios or hypervisor?${reset}" @@ -288,158 +385,288 @@ Are you sure you have enabled vmx in your bios or hypervisor?${reset}" ##verify vm exists, an has a dhcp lease assigned to it ##params: none -function setup_instack_vm { - if ! virsh list --all | grep instack > /dev/null; then - #virsh vol-create default instack.qcow2.xml - virsh define $CONFIG/instack.xml - - #Upload instack image - #virsh vol-create default --file instack.qcow2.xml - virsh vol-create-as default instack.qcow2 30G --format qcow2 +function setup_undercloud_vm { + if ! virsh list --all | grep undercloud > /dev/null; then + undercloud_nets="default admin_network" + if [[ $enabled_network_list =~ "public_network" ]]; then + undercloud_nets+=" public_network" + fi + define_vm undercloud hd 30 "$undercloud_nets" 4 12288 ### this doesn't work for some reason I was getting hangup events so using cp instead - #virsh vol-upload --pool default --vol instack.qcow2 --file $CONFIG/stack/instack.qcow2 + #virsh vol-upload --pool default --vol undercloud.qcow2 --file $CONFIG/stack/undercloud.qcow2 #2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem , 2015-11-03-13:56:46, worker1.bsys.centos.org) #2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds #2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds - #error: cannot close volume instack.qcow2 + #error: cannot close volume undercloud.qcow2 #error: internal error: received hangup / error event on socket #error: Reconnected to the hypervisor - cp -f $RESOURCES/instack.qcow2 /var/lib/libvirt/images/instack.qcow2 + local undercloud_dst=/var/lib/libvirt/images/undercloud.qcow2 + cp -f $RESOURCES/undercloud.qcow2 $undercloud_dst + + # resize Undercloud machine + echo "Checking if Undercloud needs to be resized..." + undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p') + if [ "$undercloud_size" -lt 30 ]; then + qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G + LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/undercloud.qcow2 $undercloud_dst + LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command 'xfs_growfs -d /dev/sda1 || true' + new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p') + if [ "$new_size" -lt 30 ]; then + echo "Error resizing Undercloud machine, disk size is ${new_size}" + exit 1 + else + echo "Undercloud successfully resized" + fi + else + echo "Skipped Undercloud resize, upstream is large enough" + fi else - echo "Found Instack VM, using existing VM" + echo "Found Undercloud VM, using existing VM" fi # if the VM is not running update the authkeys and start it - if ! virsh list | grep instack > /dev/null; then - echo "Injecting ssh key to instack VM" - virt-customize -c qemu:///system -d instack --run-command "mkdir /root/.ssh/" \ + if ! virsh list | grep undercloud > /dev/null; then + echo "Injecting ssh key to Undercloud VM" + LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command "mkdir -p /root/.ssh/" \ --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \ --run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \ --run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \ --run-command "chown stack:stack /home/stack/.ssh/authorized_keys && chmod 600 /home/stack/.ssh/authorized_keys" - virsh start instack + virsh start undercloud fi - sleep 3 # let DHCP happen + sleep 10 # let undercloud get started up + # get the undercloud VM IP CNT=10 - echo -n "${blue}Waiting for instack's dhcp address${reset}" - while ! grep instack /var/lib/libvirt/dnsmasq/default.leases > /dev/null && [ $CNT -gt 0 ]; do + echo -n "${blue}Waiting for Undercloud's dhcp address${reset}" + undercloud_mac=$(virsh domiflist undercloud | grep default | awk '{ print $5 }') + while ! $(arp -e | grep ${undercloud_mac} > /dev/null) && [ $CNT -gt 0 ]; do echo -n "." - sleep 3 - CNT=CNT-1 + sleep 10 + CNT=$((CNT-1)) done + UNDERCLOUD=$(arp -e | grep ${undercloud_mac} | awk {'print $1'}) - # get the instack VM IP - UNDERCLOUD=$(grep instack /var/lib/libvirt/dnsmasq/default.leases | awk '{print $3}' | head -n 1) if [ -z "$UNDERCLOUD" ]; then - #if not found then dnsmasq may be using leasefile-ro - instack_mac=$(virsh domiflist instack | grep default | \ - grep -Eo "[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+") - UNDERCLOUD=$(arp -e | grep ${instack_mac} | awk {'print $1'}) - - if [ -z "$UNDERCLOUD" ]; then - echo "\n\nNever got IP for Instack. Can Not Continue." - exit 1 - fi + echo "\n\nCan't get IP for Undercloud. Can Not Continue." + exit 1 else - echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}" + echo -e "${blue}\rUndercloud VM has IP $UNDERCLOUD${reset}" fi CNT=10 - echo -en "${blue}\rValidating instack VM connectivity${reset}" + echo -en "${blue}\rValidating Undercloud VM connectivity${reset}" while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do echo -n "." sleep 3 - CNT=$CNT-1 + CNT=$((CNT-1)) done if [ "$CNT" -eq 0 ]; then - echo "Failed to contact Instack. Can Not Continue" + echo "Failed to contact Undercloud. Can Not Continue" exit 1 fi CNT=10 while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do echo -n "." sleep 3 - CNT=$CNT-1 + CNT=$((CNT-1)) done if [ "$CNT" -eq 0 ]; then - echo "Failed to connect to Instack. Can Not Continue" + echo "Failed to connect to Undercloud. Can Not Continue" exit 1 fi # extra space to overwrite the previous connectivity output echo -e "${blue}\r ${reset}" - - #add the instack brbm1 interface - virsh attach-interface --domain instack --type network --source brbm1 --model rtl8139 --config --live sleep 1 - ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth2 | grep 192.168.37.1 > /dev/null; then ip a a 192.168.37.1/24 dev eth2; ip link set up dev eth2; fi" + ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth2 | grep ${public_network_provisioner_ip} > /dev/null; then ip a a ${public_network_provisioner_ip}/${public_network_cidr##*/} dev eth2; ip link set up dev eth2; fi" # ssh key fix for stack user ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack" } ##Create virtual nodes in virsh -##params: none +##params: vcpus, ramsize function setup_virtual_baremetal { + local vcpus ramsize + if [ -z "$1" ]; then + vcpus=4 + ramsize=8192 + elif [ -z "$2" ]; then + vcpus=$1 + ramsize=8192 + else + vcpus=$1 + ramsize=$(($2*1024)) + fi + #start by generating the opening json for instackenv.json + cat > $CONFIG/instackenv-virt.json << EOF +{ + "nodes": [ +EOF + + # next create the virtual machines and add their definitions to the file + if [ ha_enabled == "False" ]; then + # 1 controller + computes + # zero based so just pass compute count + vm_index=$VM_COMPUTES + else + # 3 controller + computes + # zero based so add 2 to compute count + vm_index=$((2+$VM_COMPUTES)) + fi + for i in $(seq 0 $vm_index); do - if ! virsh list --all | grep baremetalbrbm_brbm1_${i} > /dev/null; then - if [ ! -e $CONFIG/baremetalbrbm_brbm1_${i}.xml ]; then - define_virtual_node baremetalbrbm_brbm1_${i} - fi - virsh define $CONFIG/baremetalbrbm_brbm1_${i}.xml + if ! virsh list --all | grep baremetal${i} > /dev/null; then + define_vm baremetal${i} network 41 'admin_network' $vcpus $ramsize + for n in private_network public_network storage_network; do + if [[ $enabled_network_list =~ $n ]]; then + echo -n "$n " + virsh attach-interface --domain baremetal${i} --type network --source $n --model rtl8139 --config + fi + done else echo "Found Baremetal ${i} VM, using existing VM" fi - virsh vol-list default | grep baremetalbrbm_brbm1_${i} 2>&1> /dev/null || virsh vol-create-as default baremetalbrbm_brbm1_${i}.qcow2 40G --format qcow2 + #virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2 + mac=$(virsh domiflist baremetal${i} | grep admin_network | awk '{ print $5 }') + + cat >> $CONFIG/instackenv-virt.json << EOF + { + "pm_addr": "192.168.122.1", + "pm_user": "root", + "pm_password": "INSERT_STACK_USER_PRIV_KEY", + "pm_type": "pxe_ssh", + "mac": [ + "$mac" + ], + "cpu": "$vcpus", + "memory": "$ramsize", + "disk": "41", + "arch": "x86_64" + }, +EOF done + #truncate the last line to remove the comma behind the bracket + tail -n 1 $CONFIG/instackenv-virt.json | wc -c | xargs -I {} truncate $CONFIG/instackenv-virt.json -s -{} + + #finally reclose the bracket and close the instackenv.json file + cat >> $CONFIG/instackenv-virt.json << EOF + } + ], + "arch": "x86_64", + "host-ip": "192.168.122.1", + "power_manager": "nova.virt.baremetal.virtual_power_driver.VirtualPowerManager", + "seed-ip": "", + "ssh-key": "INSERT_STACK_USER_PRIV_KEY", + "ssh-user": "root" +} +EOF } -##Copy over the glance images and instack json file +##Create virtual nodes in virsh +##params: name - String: libvirt name for VM +## bootdev - String: boot device for the VM +## disksize - Number: size of the disk in GB +## ovs_bridges: - List: list of ovs bridges +## vcpus - Number of VCPUs to use (defaults to 4) +## ramsize - Size of RAM for VM in MB (defaults to 8192) +function define_vm () { + local vcpus ramsize + + if [ -z "$5" ]; then + vcpus=4 + ramsize=8388608 + elif [ -z "$6" ]; then + vcpus=$5 + ramsize=8388608 + else + vcpus=$5 + ramsize=$(($6*1024)) + fi + + # Create the libvirt storage volume + if virsh vol-list default | grep ${1}.qcow2 2>&1> /dev/null; then + volume_path=$(virsh vol-path --pool default ${1}.qcow2 || echo "/var/lib/libvirt/images/${1}.qcow2") + echo "Volume ${1} exists. Deleting Existing Volume $volume_path" + virsh vol-dumpxml ${1}.qcow2 --pool default > /dev/null || echo '' #ok for this to fail + touch $volume_path + virsh vol-delete ${1}.qcow2 --pool default + fi + virsh vol-create-as default ${1}.qcow2 ${3}G --format qcow2 + volume_path=$(virsh vol-path --pool default ${1}.qcow2) + if [ ! -f $volume_path ]; then + echo "$volume_path Not created successfully... Aborting" + exit 1 + fi + + # create the VM + /usr/libexec/openstack-tripleo/configure-vm --name $1 \ + --bootdev $2 \ + --image "$volume_path" \ + --diskbus sata \ + --arch x86_64 \ + --cpus $vcpus \ + --memory $ramsize \ + --libvirt-nic-driver virtio \ + --baremetal-interface $4 +} + +##Copy over the glance images and instackenv json file ##params: none function configure_undercloud { - + local controller_nic_template compute_nic_template echo - echo "Copying configuration file and disk images to instack" - scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.qcow2 "stack@$UNDERCLOUD": - scp ${SSH_OPTIONS[@]} $NETENV "stack@$UNDERCLOUD": - scp ${SSH_OPTIONS[@]} -r $CONFIG/nics/ "stack@$UNDERCLOUD": + echo "Copying configuration files to Undercloud" + if [[ "$net_isolation_enabled" == "TRUE" ]]; then + echo -e "${blue}Network Environment set for Deployment: ${reset}" + cat /tmp/network-environment.yaml + scp ${SSH_OPTIONS[@]} /tmp/network-environment.yaml "stack@$UNDERCLOUD": - # ensure stack user on instack machine has an ssh key + # check for ODL L3/ONOS + if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then + ext_net_type=br-ex + fi + + if ! controller_nic_template=$(python3.4 -B $LIB/python/apex-python-utils.py nic-template -t $CONFIG/nics-controller.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family); then + echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}" + exit 1 + fi + + if ! compute_nic_template=$(python3.4 -B $LIB/python/apex-python-utils.py nic-template -t $CONFIG/nics-compute.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family); then + echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}" + exit 1 + fi + ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI +mkdir nics/ +cat > nics/controller.yaml << EOF +$controller_nic_template +EOF +cat > nics/compute.yaml << EOF +$compute_nic_template +EOF +EOI + fi + + # ensure stack user on Undercloud machine has an ssh key ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi" if [ "$virtual" == "TRUE" ]; then - # copy the instack vm's stack user's pub key to - # root's auth keys so that instack can control + # copy the Undercloud VM's stack user's pub key to + # root's auth keys so that Undercloud can control # vm power on the hypervisor ssh ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> /root/.ssh/authorized_keys - # fix MACs to match new setup - for i in $(seq 0 $vm_index); do - pyscript="import json -data = json.load(open('$CONFIG/instackenv-virt.json')) -print data['nodes'][$i]['mac'][0]" - - old_mac=$(python -c "$pyscript") - new_mac=$(virsh dumpxml baremetalbrbm_brbm1_$i | grep "mac address" | cut -d = -f2 | grep -Eo "[0-9a-f:]+") - # this doesn't work with multiple vnics on the vms - #if [ "$old_mac" != "$new_mac" ]; then - # echo "${blue}Modifying MAC for node from $old_mac to ${new_mac}${reset}" - # sed -i 's/'"$old_mac"'/'"$new_mac"'/' $CONFIG/instackenv-virt.json - #fi - done - DEPLOY_OPTIONS+=" --libvirt-type qemu" INSTACKENV=$CONFIG/instackenv-virt.json - NETENV=$CONFIG/network-environment.yaml - # upload instackenv file to Instack for virtual deployment + # upload instackenv file to Undercloud for virtual deployment scp ${SSH_OPTIONS[@]} $INSTACKENV "stack@$UNDERCLOUD":instackenv.json fi @@ -463,9 +690,9 @@ EOI # configure undercloud on Undercloud VM echo "Running undercloud configuration." - echo "Logging undercloud configuration to instack:/home/stack/apex-undercloud-install.log" + echo "Logging undercloud configuration to undercloud:/home/stack/apex-undercloud-install.log" ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI -if [ -n "$DEPLOY_SETTINGS_FILE" ]; then +if [[ "$net_isolation_enabled" == "TRUE" ]]; then sed -i 's/#local_ip/local_ip/' undercloud.conf sed -i 's/#network_gateway/network_gateway/' undercloud.conf sed -i 's/#network_cidr/network_cidr/' undercloud.conf @@ -474,71 +701,177 @@ if [ -n "$DEPLOY_SETTINGS_FILE" ]; then sed -i 's/#inspection_iprange/inspection_iprange/' undercloud.conf sed -i 's/#undercloud_debug/undercloud_debug/' undercloud.conf - openstack-config --set undercloud.conf DEFAULT local_ip ${deploy_options_array['instack_ip']}/${deploy_options_array['provisioning_cidr']##*/} - openstack-config --set undercloud.conf DEFAULT network_gateway ${deploy_options_array['provisioning_gateway']} - openstack-config --set undercloud.conf DEFAULT network_cidr ${deploy_options_array['provisioning_cidr']} - openstack-config --set undercloud.conf DEFAULT dhcp_start ${deploy_options_array['provisioning_dhcp_start']} - openstack-config --set undercloud.conf DEFAULT dhcp_end ${deploy_options_array['provisioning_dhcp_end']} - openstack-config --set undercloud.conf DEFAULT inspection_iprange ${deploy_options_array['provisioning_inspection_iprange']} + openstack-config --set undercloud.conf DEFAULT local_ip ${admin_network_provisioner_ip}/${admin_network_cidr##*/} + openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_network_provisioner_ip} + openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_network_cidr} + openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_network_dhcp_range%%,*} + openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_network_dhcp_range##*,} + openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_network_introspection_range} openstack-config --set undercloud.conf DEFAULT undercloud_debug false - if [ -n "$net_isolation_enabled" ]; then - sed -i '/ControlPlaneSubnetCidr/c\\ ControlPlaneSubnetCidr: "${deploy_options_array['provisioning_cidr']##*/}"' network-environment.yaml - sed -i '/ControlPlaneDefaultRoute/c\\ ControlPlaneDefaultRoute: ${deploy_options_array['provisioning_gateway']}' network-environment.yaml - sed -i '/ExternalNetCidr/c\\ ExternalNetCidr: ${deploy_options_array['ext_net_cidr']}' network-environment.yaml - sed -i '/ExternalAllocationPools/c\\ ExternalAllocationPools: [{'start': '${deploy_options_array['ext_allocation_pool_start']}', 'end': '${deploy_options_array['ext_allocation_pool_end']}'}]' network-environment.yaml - sed -i '/ExternalInterfaceDefaultRoute/c\\ ExternalInterfaceDefaultRoute: ${deploy_options_array['ext_gateway']}' network-environment.yaml - fi fi -openstack undercloud install &> apex-undercloud-install.log +sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml +sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml +sudo sed -i '/CephAdminKey:/c\\ CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml + +# we assume that packages will not need to be updated with undercloud install +# and that it will be used only to configure the undercloud +# packages updates would need to be handled manually with yum update +sudo cp -f /usr/share/diskimage-builder/elements/yum/bin/install-packages /usr/share/diskimage-builder/elements/yum/bin/install-packages.bak +cat << 'EOF' | sudo tee /usr/share/diskimage-builder/elements/yum/bin/install-packages > /dev/null +#!/bin/sh +exit 0 +EOF + +openstack undercloud install &> apex-undercloud-install.log || { + # cat the undercloud install log incase it fails + echo "ERROR: openstack undercloud install has failed. Dumping Log:" + cat apex-undercloud-install.log + exit 1 +} + +sleep 30 +sudo systemctl restart openstack-glance-api +sudo systemctl restart openstack-nova-conductor +sudo systemctl restart openstack-nova-compute + +sudo sed -i '/num_engine_workers/c\num_engine_workers = 2' /etc/heat/heat.conf +sudo sed -i '/#workers\s=/c\workers = 2' /etc/heat/heat.conf +sudo systemctl restart openstack-heat-engine +sudo systemctl restart openstack-heat-api EOI +# WORKAROUND: must restart the above services to fix sync problem with nova compute manager +# TODO: revisit and file a bug if necessary. This should eventually be removed +# as well as glance api problem +echo -e "${blue}INFO: Sleeping 15 seconds while services come back from restart${reset}" +sleep 15 + } ##preping it for deployment and launch the deploy ##params: none function undercloud_prep_overcloud_deploy { - - if [[ ${#deploy_options_array[@]} -eq 0 || ${deploy_options_array['sdn_controller']} == 'opendaylight' ]]; then - DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight.yaml" - elif [ ${deploy_options_array['sdn_controller']} == 'opendaylight-external' ]; then + if [[ "${#deploy_options_array[@]}" -eq 0 || "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then + if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then + DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_l3.yaml" + elif [ "${deploy_options_array['sfc']}" == 'True' ]; then + DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sfc.yaml" + elif [ "${deploy_options_array['vpn']}" == 'True' ]; then + DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sdnvpn.yaml" + else + DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight.yaml" + fi + SDN_IMAGE=opendaylight + if [ "${deploy_options_array['sfc']}" == 'True' ]; then + SDN_IMAGE+=-sfc + if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then + echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute an SFC deployment." + echo "Please install the opnfv-apex-opendaylight-sfc package to provide this overcloud image for deployment.${reset}" + exit 1 + fi + fi + elif [ "${deploy_options_array['sdn_controller']}" == 'opendaylight-external' ]; then DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight-external.yaml" - elif [ ${deploy_options_array['sdn_controller']} == 'onos' ]; then - echo -e "${red}ERROR: ONOS is currently unsupported...exiting${reset}" - exit 1 - elif [ ${deploy_options_array['sdn_controller']} == 'opencontrail' ]; then + SDN_IMAGE=opendaylight + elif [ "${deploy_options_array['sdn_controller']}" == 'onos' ]; then + DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/onos.yaml" + SDN_IMAGE=onos + elif [ "${deploy_options_array['sdn_controller']}" == 'opencontrail' ]; then echo -e "${red}ERROR: OpenContrail is currently unsupported...exiting${reset}" exit 1 + elif [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then + echo -e "${blue}INFO: SDN Controller disabled...will deploy nosdn scenario${reset}" + SDN_IMAGE=opendaylight + else + echo "${red}Invalid sdn_controller: ${deploy_options_array['sdn_controller']}${reset}" + echo "${red}Valid choices are opendaylight, opendaylight-external, onos, opencontrail, False, or null${reset}" + exit 1 + fi + + # Make sure the correct overcloud image is available + if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then + echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment." + echo "Both ONOS and OpenDaylight are currently deployed from this image." + echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}" + exit 1 + fi + + echo "Copying overcloud image to Undercloud" + ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2" + scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2 + + # Push performance options to subscript to modify per-role images as needed + for option in "${performance_options[@]}" ; do + echo -e "${blue}Setting performance option $option${reset}" + ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "bash build_perf_image.sh $option" + done + + # Add performance deploy options if they have been set + if [ ! -z "${deploy_options_array['performance']}" ]; then + DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml" fi + # make sure ceph is installed + DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" + + # scale compute nodes according to inventory + total_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/instackenv.json | grep -c memory") + # check if HA is enabled - if [[ "$ha_enabled" == "TRUE" ]]; then - DEPLOY_OPTIONS+=" --control-scale 3 --compute-scale 2" + if [[ "$ha_enabled" == "True" ]]; then + DEPLOY_OPTIONS+=" --control-scale 3" + compute_nodes=$((total_nodes - 3)) DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml" + else + compute_nodes=$((total_nodes - 1)) + fi + + if [ "$compute_nodes" -le 0 ]; then + echo -e "${red}ERROR: Invalid number of compute nodes: ${compute_nodes}. Check your inventory file.${reset}" + exit 1 + else + echo -e "${blue}INFO: Number of compute nodes set for deployment: ${compute_nodes}${reset}" + DEPLOY_OPTIONS+=" --compute-scale ${compute_nodes}" fi if [[ "$net_isolation_enabled" == "TRUE" ]]; then - DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml" + #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml" DEPLOY_OPTIONS+=" -e network-environment.yaml" fi - if [[ "$ha_enabled" == "TRUE" ]] || [[ $net_isolation_enabled == "TRUE" ]]; then + if [[ "$ha_enabled" == "True" ]] || [[ "$net_isolation_enabled" == "TRUE" ]]; then DEPLOY_OPTIONS+=" --ntp-server $ntp_server" fi if [[ ! "$virtual" == "TRUE" ]]; then DEPLOY_OPTIONS+=" --control-flavor control --compute-flavor compute" + else + DEPLOY_OPTIONS+=" -e virtual-environment.yaml" fi + DEPLOY_OPTIONS+=" -e opnfv-environment.yaml" + + echo -e "${blue}INFO: Deploy options set:\n${DEPLOY_OPTIONS}${reset}" + ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" < deploy_command << EOF +openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90 +EOF +EOI + + if [ "$interactive" == "TRUE" ]; then + if ! prompt_user "Overcloud Deployment"; then + echo -e "${blue}INFO: User requests exit${reset}" + exit 0 + fi + fi + + ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" </dev/null; then + $(typeset -f debug_stack) + debug_stack + exit 1 +fi +EOI + + if [ "$debug" == 'TRUE' ]; then + ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" < /dev/null parse_cmdline "$@" + echo -e "${blue}INFO: Parsing network settings file...${reset}" + parse_network_settings if ! configure_deps; then - echo "Dependency Validation Failed, Exiting." + echo -e "${red}Dependency Validation Failed, Exiting.${reset}" + exit 1 fi if [ -n "$DEPLOY_SETTINGS_FILE" ]; then + echo -e "${blue}INFO: Parsing deploy settings file...${reset}" parse_deploy_settings fi - setup_instack_vm + setup_undercloud_vm if [ "$virtual" == "TRUE" ]; then - setup_virtual_baremetal + setup_virtual_baremetal $VM_CPUS $VM_RAM elif [ -n "$INVENTORY_FILE" ]; then parse_inventory_file fi configure_undercloud undercloud_prep_overcloud_deploy + if [ "$post_config" == "TRUE" ]; then + if ! configure_post_install; then + echo -e "${red}ERROR:Post Install Configuration Failed, Exiting.${reset}" + exit 1 + else + echo -e "${blue}INFO: Post Install Configuration Complete${reset}" + fi + fi + if [[ "${deploy_options_array['sdn_controller']}" == 'onos' ]]; then + if ! onos_update_gw_mac ${public_network_cidr} ${public_network_gateway}; then + echo -e "${red}ERROR:ONOS Post Install Configuration Failed, Exiting.${reset}" + exit 1 + else + echo -e "${blue}INFO: ONOS Post Install Configuration Complete${reset}" + fi + fi } main "$@"