+ ##clone genesis and move into node folder
+ clone_bgs $vm_dir/$node
+
+ cd $vm_dir/$node
+
+ if [ $base_config ]; then
+ if ! cp -f $base_config opnfv_ksgen_settings.yml; then
+ echo "${red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}"
+ exit 1
+ fi
+ fi
+
+ ##parse yaml into variables
+ eval $(parse_yaml opnfv_ksgen_settings.yml "config_")
+ ##find node type
+ node_type=config_nodes_${node}_type
+ node_type=$(eval echo \$$node_type)
+
+ ##modify memory and cpu
+ node_memory=$(eval echo \${config_nodes_${node}_memory})
+ node_vcpus=$(eval echo \${config_nodes_${node}_cpus})
+ node_storage=$(eval echo \${config_nodes_${node}_disk})
+
+ sed -i 's/^.*vb.memory =.*$/ vb.memory = '"$node_memory"'/' Vagrantfile
+ sed -i 's/^.*vb.cpus =.*$/ vb.cpus = '"$node_vcpus"'/' Vagrantfile
+
+ if ! resize_vagrant_disk $node_storage; then
+ echo "${red}Error while resizing vagrant box to size $node_storage for $node! ${reset}"
+ exit 1
+ fi
+
+ ##trozet test make compute nodes wait 20 minutes
+ if [ "$compute_wait_completed" = false ] && [ "$node_type" != "controller" ]; then
+ echo "${blue}Waiting 20 minutes for Control nodes to install before continuing with Compute nodes..."
+ compute_wait_completed=true
+ sleep 1400
+ fi
+
+ ## Add Admin interface
+ mac_string=config_nodes_${node}_mac_address
+ mac_addr=$(eval echo \$$mac_string)
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ if [ $mac_addr == "" ]; then
+ echo "${red} Unable to find mac_address for $node! ${reset}"
+ exit 1
+ fi
+ this_admin_ip=${admin_ip_arr[$node]}
+ sed -i 's/^.*eth_replace0.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$this_admin_ip"\"', netmask: '\""$admin_subnet_mask"\"', :mac => '\""$mac_addr"\"'/' Vagrantfile
+
+ ## Add private interface
+ if [ "$node_type" == "controller" ]; then
+ mac_string=config_nodes_${node}_private_mac
+ mac_addr=$(eval echo \$$mac_string)
+ if [ $mac_addr == "" ]; then
+ echo "${red} Unable to find private_mac for $node! ${reset}"
+ exit 1
+ fi
+ else
+ ##generate random mac
+ mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+ fi
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ if [ "$node_type" == "controller" ]; then
+ new_node_ip=${controllers_ip_arr[$controller_count]}
+ if [ ! "$new_node_ip" ]; then
+ echo "{red}ERROR: Empty node ip for controller $controller_count ${reset}"
+ exit 1
+ fi
+ ((controller_count++))
+ else
+ next_private_ip=$(next_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ echo "{red}ERROR: Could not find private ip for $node ${reset}"
+ exit 1
+ fi
+ new_node_ip=$next_private_ip
+ fi
+ sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", :mac => '\""$mac_addr"\"', ip: '\""$new_node_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+ ##replace host_ip in vm_nodes_provision with private ip
+ sed -i 's/^host_ip=REPLACE/host_ip='$new_node_ip'/' vm_nodes_provision.sh
+ ##replace ping site
+ if [ ! -z "$ping_site" ]; then
+ sed -i 's/www.google.com/'$ping_site'/' vm_nodes_provision.sh
+ fi
+
+ ##find public ip info and add public interface
+ mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ this_public_ip=${public_ip_arr[$node]}
+
+ if [ -z "$enable_virtual_dhcp" ]; then
+ sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"', ip: '\""$this_public_ip"\"', netmask: '\""$public_subnet_mask"\"'/' Vagrantfile
+ else
+ sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile
+ fi
+ remove_vagrant_network eth_replace3
+
+ ##modify provisioning to do puppet install, config, and foreman check-in
+ ##substitute host_name and dns_server in the provisioning script
+ host_string=config_nodes_${node}_short_name
+ short_host_name=$(eval echo \$$host_string)
+ ##substitute domain_name
+ domain_name=$config_domain_name
+ sed -i 's/^domain_name=REPLACE/domain_name='$domain_name'/' vm_nodes_provision.sh
+ host_name=${short_host_name}.${domain_name}
+ sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh
+ ##dns server should be the foreman server
+ sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh
+ ## remove bootstrap and NAT provisioning
+ sed -i '/nat_setup.sh/d' Vagrantfile
+ sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile
+ ## modify default_gw to be node_default_gw
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile
+ echo "${blue}Starting Vagrant Node $node! ${reset}"
+ ##stand up vagrant
+ if ! vagrant up; then
+ echo "${red} Unable to start $node ${reset}"
+ exit 1
+ else
+ echo "${blue} $node VM is up! ${reset}"
+ fi
+ done
+ echo "${blue} All VMs are UP! ${reset}"
+ echo "${blue} Waiting for puppet to complete on the nodes... ${reset}"
+ ##check puppet is complete
+ ##ssh into foreman server, run check to verify puppet is complete
+ pushd $vm_dir/foreman_vm
+ if ! vagrant ssh -c "/opt/khaleesi/run.sh --no-logs --use /vagrant/opnfv_ksgen_settings.yml /opt/khaleesi/playbooks/validate_opnfv-vm.yml"; then
+ echo "${red} Failed to validate puppet completion on nodes ${reset}"
+ exit 1
+ else
+ echo "{$blue} Puppet complete on all nodes! ${reset}"
+ fi
+ popd
+ ##add routes back to nodes
+ for node in ${nodes}; do
+ pushd $vm_dir/$node
+ if ! vagrant ssh -c "route | grep default | grep $this_default_gw"; then
+ echo "${blue} Adding public route back to $node! ${reset}"
+ vagrant ssh -c "route add default gw $this_default_gw"
+ vagrant ssh -c "route delete default gw 10.0.2.2"
+ fi
+ popd
+ done
+ if [ ! -z "$horizon_public_vip" ]; then
+ echo "${blue} Virtual deployment SUCCESS!! Foreman URL: http://${foreman_ip}, Horizon URL: http://${horizon_public_vip} ${reset}"
+ else
+ ##Find public IP of controller
+ for node in ${nodes}; do
+ node_type=config_nodes_${node}_type
+ node_type=$(eval echo \$$node_type)
+ if [ "$node_type" == "controller" ]; then
+ pushd $vm_dir/$node
+ horizon_ip=`vagrant ssh -c "ifconfig enp0s10" | grep -Eo "inet [0-9\.]+" | awk {'print $2'}`
+ popd
+ break
+ fi
+ done
+ if [ -z "$horizon_ip" ]; then
+ echo "${red}Warn: Unable to determine horizon IP, please login to your controller node to find it${reset}"
+ fi
+ echo "${blue} Virtual deployment SUCCESS!! Foreman URL: http://${foreman_ip}, Horizon URL: http://${horizon_ip} ${reset}"
+ fi
+ fi
+}
+
+##check to make sure nodes are powered off
+##this function does nothing if virtual
+##params: none
+##usage: check_baremetal_nodes()
+check_baremetal_nodes() {
+ if [ $virtual ]; then
+ echo "${blue}Skipping Baremetal node power status check as deployment is virtual ${reset}"
+ else
+ echo "${blue}Checking Baremetal nodes power state... ${reset}"
+ if [ ! -z "$base_config" ]; then
+ # Install ipmitool
+ # Major version is pinned to force some consistency for Arno
+ if ! yum list installed | grep -i ipmitool; then
+ echo "${blue}Installing ipmitool...${reset}"
+ if ! yum -y install ipmitool-1*; then
+ echo "${red}Failed to install ipmitool!${reset}"
+ exit 1
+ fi
+ fi
+
+ ###find all the bmc IPs and number of nodes
+ node_counter=0
+ output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'`
+ for line in ${output} ; do
+ bmc_ip[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ max_nodes=$((node_counter-1))
+
+ ###find bmc_users per node
+ node_counter=0
+ output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'`
+ for line in ${output} ; do
+ bmc_user[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ ###find bmc_pass per node
+ node_counter=0
+ output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
+ for line in ${output} ; do
+ bmc_pass[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ for mynode in `seq 0 $max_nodes`; do
+ echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}"
+ ipmi_output=`ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis status \
+ | grep "System Power" | cut -d ':' -f2 | tr -d [:blank:]`
+ if [ "$ipmi_output" == "on" ]; then
+ echo "${red}Error: Node is powered on: ${bmc_ip[$mynode]} ${reset}"
+ echo "${red}Please run clean.sh before running deploy! ${reset}"
+ exit 1
+ elif [ "$ipmi_output" == "off" ]; then
+ echo "${blue}Node: ${bmc_ip[$mynode]} is powered off${reset}"
+ else
+ echo "${red}Warning: Unable to detect node power state: ${bmc_ip[$mynode]} ${reset}"
+ fi
+ done
+ else
+ echo "${red}base_config was not provided for a baremetal install! Exiting${reset}"
+ exit 1
+ fi
+ fi
+}
+
+##resizes vagrant disk (cannot shrink)
+##params: size in GB
+##usage: resize_vagrant_disk 100
+resize_vagrant_disk() {
+ if [[ "$1" < 40 ]]; then
+ echo "${blue}Warn: Requested disk size cannot be less than 40, using 40 as new size${reset}"
+ new_size_gb=40
+ else
+ new_size_gb=$1
+ fi
+
+ if ! vagrant box list | grep opnfv; then
+ vagrant box remove -f opnfv/centos-7.0
+ if ! vagrant box add opnfv/centos-7.0 --provider virtualbox; then
+ echo "${red}Unable to reclone vagrant box! Exiting...${reset}"
+ exit 1
+ fi
+ fi
+
+ pushd $vagrant_box_dir
+
+ # Close medium to make sure we can modify it
+ vboxmanage closemedium disk $vagrant_box_vmdk
+
+ cur_size=$(vboxmanage showhdinfo $vagrant_box_vmdk | grep -i capacity | grep -Eo [0-9]+)
+ cur_size_gb=$((cur_size / 1024))
+
+ if [ "$cur_size_gb" -eq "$new_size_gb" ]; then
+ echo "${blue}Info: Disk size already ${cur_size_gb} ${reset}"
+ popd
+ return
+ elif [[ "$new_size_gb" < "$cur_size_gb" ]] ; then
+ echo "${blue}Info: Requested disk is less than ${cur_size_gb} ${reset}"
+ echo "${blue}Re-adding vagrant box${reset}"
+ if vagrant box list | grep opnfv; then
+ popd
+ vagrant box remove -f opnfv/centos-7.0
+ if ! vagrant box add opnfv/centos-7.0 --provider virtualbox; then
+ echo "${red}Unable to reclone vagrant box! Exiting...${reset}"
+ exit 1
+ fi
+ pushd $vagrant_box_dir
+ fi
+ fi
+
+ new_size=$((new_size_gb * 1024))
+ if ! vboxmanage clonehd $vagrant_box_vmdk tmp-disk.vdi --format vdi; then
+ echo "${red}Error: Unable to clone ${vagrant_box_vmdk}${reset}"
+ popd
+ return 1
+ fi
+
+ if ! vboxmanage modifyhd tmp-disk.vdi --resize $new_size; then
+ echo "${red}Error: Unable modify tmp-disk.vdi to ${new_size}${reset}"
+ popd
+ return 1
+ fi
+
+ if ! vboxmanage clonehd tmp-disk.vdi resized-disk.vmdk --format vmdk; then
+ echo "${red}Error: Unable clone tmp-disk.vdi to vmdk${reset}"
+ popd
+ return 1
+ fi
+
+ vboxmanage closemedium disk tmp-disk.vdi --delete
+ rm -f tmp-disk.vdi $vagrant_box_vmdk
+ cp -f resized-disk.vmdk $vagrant_box_vmdk
+ vboxmanage closemedium disk resized-disk.vmdk --delete
+ popd
+}
+
+##END FUNCTIONS
+
+main() {
+ parse_cmdline "$@"
+ disable_selinux
+ check_baremetal_nodes
+ install_EPEL
+ install_vbox
+ install_ansible
+ install_vagrant
+ clean_tmp
+ verify_vm_dir
+ clone_bgs $vm_dir/foreman_vm
+ configure_network
+ configure_virtual
+ start_foreman
+ start_virtual_nodes
+}