2 ##############################################################################
3 # Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
11 # Deploy script to install provisioning server for OPNFV Apex
12 # author: Dan Radez (dradez@redhat.com)
13 # author: Tim Rozet (trozet@redhat.com)
15 # Based on RDO Manager http://www.rdoproject.org
20 reset=$(tput sgr0 || echo "")
21 blue=$(tput setaf 4 || echo "")
22 red=$(tput setaf 1 || echo "")
23 green=$(tput setaf 2 || echo "")
28 ntp_server="pool.ntp.org"
29 net_isolation_enabled="TRUE"
36 declare -A deploy_options_array
39 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
41 RESOURCES=${RESOURCES:-'/var/opt/opnfv/images'}
42 CONFIG=${CONFIG:-'/var/opt/opnfv'}
43 OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network"
44 # Netmap used to map networks to OVS bridge names
45 NET_MAP['admin_network']="br-admin"
46 NET_MAP['private_network']="br-private"
47 NET_MAP['public_network']="br-public"
48 NET_MAP['storage_network']="br-storage"
51 ##translates yaml into variables
52 ##params: filename, prefix (ex. "config_")
53 ##usage: parse_yaml opnfv_ksgen_settings.yml "config_"
56 local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034')
57 sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
58 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 |
60 indent = length($1)/2;
62 for (i in vname) {if (i > indent) {delete vname[i]}}
64 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
65 printf("%s%s%s=%s\n", "'$prefix'",vn, $2, $3);
70 ##checks if prefix exists in string
71 ##params: string, prefix
72 ##usage: contains_prefix "deploy_setting_launcher=1" "deploy_setting"
76 if echo $mystr | grep -E "^$prefix.*$" > /dev/null; then
82 ##parses variable from a string with '='
83 ##and removes global prefix
84 ##params: string, prefix
85 ##usage: parse_setting_var 'deploy_myvar=2' 'deploy_'
89 if echo $mystr | grep -E "^.+\=" > /dev/null; then
90 echo $(echo $mystr | grep -Eo "^.+\=" | tr -d '=' | sed 's/^'"$prefix"'//')
95 ##parses value from a string with '='
97 ##usage: parse_setting_value
98 parse_setting_value() {
100 echo $(echo $mystr | grep -Eo "\=.*$" | tr -d '=')
102 ##parses network settings yaml into globals
103 parse_network_settings() {
104 if output=$(python3.4 -B $CONFIG/lib/python/apex-python-utils.py parse_net_settings -n $NETSETS -i $net_isolation_enabled); then
106 echo -e "${blue}${output}${reset}"
112 ##parses deploy settings yaml into globals and options array
114 ##usage: parse_deploy_settings
115 parse_deploy_settings() {
116 local global_prefix="deploy_global_params_"
117 local options_prefix="deploy_deploy_options_"
119 local settings=$(parse_yaml $DEPLOY_SETTINGS_FILE "deploy_")
121 for this_setting in $settings; do
122 if contains_prefix $this_setting $global_prefix; then
123 myvar=$(parse_setting_var $this_setting $global_prefix)
124 if [ -z "$myvar" ]; then
125 echo -e "${red}ERROR: while parsing ${DEPLOY_SETTINGS_FILE} for setting: ${this_setting}${reset}"
127 myvalue=$(parse_setting_value $this_setting)
128 # Do not override variables set by cmdline
129 if [ -z "$(eval echo \$$myvar)" ]; then
130 eval "$myvar=\$myvalue"
131 echo -e "${blue}Global parameter set: ${myvar}:${myvalue}${reset}"
133 echo -e "${blue}Global parameter already set: ${myvar}${reset}"
135 elif contains_prefix $this_setting $options_prefix; then
136 myvar=$(parse_setting_var $this_setting $options_prefix)
137 if [ -z "$myvar" ]; then
138 echo -e "${red}ERROR: while parsing ${DEPLOY_SETTINGS_FILE} for setting: ${this_setting}${reset}"
140 myvalue=$(parse_setting_value $this_setting)
141 deploy_options_array[$myvar]=$myvalue
142 echo -e "${blue}Deploy option set: ${myvar}:${myvalue}${reset}"
146 ##parses baremetal yaml settings into compatible json
147 ##writes the json to $CONFIG/instackenv_tmp.json
149 ##usage: parse_inventory_file
150 parse_inventory_file() {
151 local inventory=$(parse_yaml $INVENTORY_FILE)
153 local node_prefix="node"
158 # detect number of nodes
159 for entry in $inventory; do
160 if echo $entry | grep -Eo "^nodes_node[0-9]+_" > /dev/null; then
161 this_node=$(echo $entry | grep -Eo "^nodes_node[0-9]+_")
162 if [[ "$inventory_list" != *"$this_node"* ]]; then
163 inventory_list+="$this_node "
168 inventory_list=$(echo $inventory_list | sed 's/ $//')
170 for node in $inventory_list; do
174 node_total=$node_count
176 if [[ "$node_total" -lt 5 && ( ha_enabled == "TRUE" || "$ha_enabled" == "true" ) ]]; then
177 echo -e "${red}ERROR: You must provide at least 5 nodes for HA baremetal deployment${reset}"
179 elif [[ "$node_total" -lt 2 ]]; then
180 echo -e "${red}ERROR: You must provide at least 2 nodes for non-HA baremetal deployment${reset}"
184 eval $(parse_yaml $INVENTORY_FILE) || {
185 echo "${red}Failed to parse inventory.yaml. Aborting.${reset}"
195 for node in $inventory_list; do
199 \"pm_password\": \"$(eval echo \${${node}ipmi_pass})\",
200 \"pm_type\": \"$(eval echo \${${node}pm_type})\",
202 \"$(eval echo \${${node}mac_address})\"
204 \"cpu\": \"$(eval echo \${${node}cpus})\",
205 \"memory\": \"$(eval echo \${${node}memory})\",
206 \"disk\": \"$(eval echo \${${node}disk})\",
207 \"arch\": \"$(eval echo \${${node}arch})\",
208 \"pm_user\": \"$(eval echo \${${node}ipmi_user})\",
209 \"pm_addr\": \"$(eval echo \${${node}ipmi_ip})\",
210 \"capabilities\": \"$(eval echo \${${node}capabilities})\"
212 instackenv_output+=${node_output}
213 if [ $node_count -lt $node_total ]; then
214 instackenv_output+=" },"
216 instackenv_output+=" }"
224 #Copy instackenv.json to undercloud for baremetal
225 echo -e "{blue}Parsed instackenv JSON:\n${instackenv_output}${reset}"
226 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
227 cat > instackenv.json << EOF
233 ##verify internet connectivity
235 function verify_internet {
236 if ping -c 2 $ping_site > /dev/null; then
237 if ping -c 2 www.google.com > /dev/null; then
238 echo "${blue}Internet connectivity detected${reset}"
241 echo "${red}Internet connectivity detected, but DNS lookup failed${reset}"
245 echo "${red}No internet connectivity detected${reset}"
250 ##download dependencies if missing and configure host
252 function configure_deps {
253 if ! verify_internet; then
254 echo "${red}Will not download dependencies${reset}"
258 # verify ip forwarding
259 if sysctl net.ipv4.ip_forward | grep 0; then
260 sudo sysctl -w net.ipv4.ip_forward=1
261 sudo sh -c "echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf"
264 # ensure no dhcp server is running on jumphost
265 if ! sudo systemctl status dhcpd | grep dead; then
266 echo "${red}WARN: DHCP Server detected on jumphost, disabling...${reset}"
267 sudo systemctl stop dhcpd
268 sudo systemctl disable dhcpd
271 # ensure networks are configured
272 systemctl status libvirtd || systemctl start libvirtd
273 systemctl status openvswitch || systemctl start openvswitch
275 # If flat we only use admin network
276 if [[ "$net_isolation_enabled" == "FALSE" ]]; then
277 virsh_enabled_networks="admin_network"
278 enabled_network_list="admin_network"
279 # For baremetal we only need to create/attach Undercloud to admin and public
280 elif [ "$virtual" == "FALSE" ]; then
281 virsh_enabled_networks="admin_network public_network"
283 virsh_enabled_networks=$enabled_network_list
286 # ensure default network is configured correctly
287 libvirt_dir="/usr/share/libvirt/networks"
288 virsh net-list --all | grep default || virsh net-define ${libvirt_dir}/default.xml
289 virsh net-list --all | grep -E "default\s+active" > /dev/null || virsh net-start default
290 virsh net-list --all | grep -E "default\s+active\s+yes" > /dev/null || virsh net-autostart --network default
292 for network in ${OPNFV_NETWORK_TYPES}; do
293 echo "${blue}INFO: Creating Virsh Network: $network & OVS Bridge: ${NET_MAP[$network]}${reset}"
294 ovs-vsctl list-br | grep ${NET_MAP[$network]} > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]}
295 virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
297 <name>$network</name>
298 <forward mode='bridge'/>
299 <bridge name='${NET_MAP[$network]}'/>
300 <virtualport type='openvswitch'/>
303 if ! (virsh net-list --all | grep $network > /dev/null); then
304 echo "${red}ERROR: unable to create network: ${network}${reset}"
307 rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null;
308 virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network
309 virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network
312 echo -e "${blue}INFO: Bridges set: ${reset}"
314 echo -e "${blue}INFO: virsh networks set: ${reset}"
317 if [[ -z "$virtual" || "$virtual" == "FALSE" ]]; then
318 # bridge interfaces to correct OVS instances for baremetal deployment
319 for network in ${enabled_network_list}; do
320 if [[ "$network" != "admin_network" && "$network" != "public_network" ]]; then
323 this_interface=$(eval echo \${${network}_bridged_interface})
324 # check if this a bridged interface for this network
325 if [[ ! -z "$this_interface" || "$this_interface" != "none" ]]; then
326 if ! attach_interface_to_ovs ${NET_MAP[$network]} ${this_interface} ${network}; then
327 echo -e "${red}ERROR: Unable to bridge interface ${this_interface} to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
330 echo -e "${blue}INFO: Interface ${this_interface} bridged to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
333 echo "${red}ERROR: Unable to determine interface to bridge to for enabled network: ${network}${reset}"
339 # ensure storage pool exists and is started
340 virsh pool-list --all | grep default > /dev/null || virsh pool-define-as --name default dir --target /var/lib/libvirt/images
341 virsh pool-list | grep -Eo "default\s+active" > /dev/null || (virsh pool-autostart default; virsh pool-start default)
343 if ! egrep '^flags.*(vmx|svm)' /proc/cpuinfo > /dev/null; then
344 echo "${red}virtualization extensions not found, kvm kernel module insertion may fail.\n \
345 Are you sure you have enabled vmx in your bios or hypervisor?${reset}"
348 if ! lsmod | grep kvm > /dev/null; then modprobe kvm; fi
349 if ! lsmod | grep kvm_intel > /dev/null; then modprobe kvm_intel; fi
351 if ! lsmod | grep kvm > /dev/null; then
352 echo "${red}kvm kernel modules not loaded!${reset}"
357 if [ ! -e ~/.ssh/id_rsa.pub ]; then
358 ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
361 echo "${blue}All dependencies installed and running${reset}"
364 ##verify vm exists, an has a dhcp lease assigned to it
366 function setup_undercloud_vm {
367 if ! virsh list --all | grep undercloud > /dev/null; then
368 undercloud_nets="default admin_network"
369 if [[ $enabled_network_list =~ "public_network" ]]; then
370 undercloud_nets+=" public_network"
372 define_vm undercloud hd 30 "$undercloud_nets"
374 ### this doesn't work for some reason I was getting hangup events so using cp instead
375 #virsh vol-upload --pool default --vol undercloud.qcow2 --file $CONFIG/stack/undercloud.qcow2
376 #2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem <http://bugs.centos.org>, 2015-11-03-13:56:46, worker1.bsys.centos.org)
377 #2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
378 #2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
379 #error: cannot close volume undercloud.qcow2
380 #error: internal error: received hangup / error event on socket
381 #error: Reconnected to the hypervisor
383 local undercloud_dst=/var/lib/libvirt/images/undercloud.qcow2
384 cp -f $RESOURCES/undercloud.qcow2 $undercloud_dst
386 # resize Undercloud machine
387 echo "Checking if Undercloud needs to be resized..."
388 undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
389 if [ "$undercloud_size" -lt 30 ]; then
390 qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G
391 LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/undercloud.qcow2 $undercloud_dst
392 LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
393 new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
394 if [ "$new_size" -lt 30 ]; then
395 echo "Error resizing Undercloud machine, disk size is ${new_size}"
398 echo "Undercloud successfully resized"
401 echo "Skipped Undercloud resize, upstream is large enough"
405 echo "Found Undercloud VM, using existing VM"
408 # if the VM is not running update the authkeys and start it
409 if ! virsh list | grep undercloud > /dev/null; then
410 echo "Injecting ssh key to Undercloud VM"
411 LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command "mkdir -p /root/.ssh/" \
412 --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
413 --run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
414 --run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
415 --run-command "chown stack:stack /home/stack/.ssh/authorized_keys && chmod 600 /home/stack/.ssh/authorized_keys"
416 virsh start undercloud
419 sleep 10 # let undercloud get started up
421 # get the undercloud VM IP
423 echo -n "${blue}Waiting for Undercloud's dhcp address${reset}"
424 undercloud_mac=$(virsh domiflist undercloud | grep default | awk '{ print $5 }')
425 while ! $(arp -e | grep ${undercloud_mac} > /dev/null) && [ $CNT -gt 0 ]; do
430 UNDERCLOUD=$(arp -e | grep ${undercloud_mac} | awk {'print $1'})
432 if [ -z "$UNDERCLOUD" ]; then
433 echo "\n\nCan't get IP for Undercloud. Can Not Continue."
436 echo -e "${blue}\rUndercloud VM has IP $UNDERCLOUD${reset}"
440 echo -en "${blue}\rValidating Undercloud VM connectivity${reset}"
441 while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do
446 if [ "$CNT" -eq 0 ]; then
447 echo "Failed to contact Undercloud. Can Not Continue"
451 while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do
456 if [ "$CNT" -eq 0 ]; then
457 echo "Failed to connect to Undercloud. Can Not Continue"
461 # extra space to overwrite the previous connectivity output
462 echo -e "${blue}\r ${reset}"
464 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth2 | grep ${public_network_provisioner_ip} > /dev/null; then ip a a ${public_network_provisioner_ip}/${public_network_cidr##*/} dev eth2; ip link set up dev eth2; fi"
466 # ssh key fix for stack user
467 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack"
470 ##Create virtual nodes in virsh
472 function setup_virtual_baremetal {
473 #start by generating the opening json for instackenv.json
474 cat > $CONFIG/instackenv-virt.json << EOF
479 # next create the virtual machines and add their definitions to the file
480 for i in $(seq 0 $vm_index); do
481 if ! virsh list --all | grep baremetal${i} > /dev/null; then
482 define_vm baremetal${i} network 41 'admin_network'
483 for n in private_network public_network storage_network; do
484 if [[ $enabled_network_list =~ $n ]]; then
486 virsh attach-interface --domain baremetal${i} --type network --source $n --model rtl8139 --config
490 echo "Found Baremetal ${i} VM, using existing VM"
492 #virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2
493 mac=$(virsh domiflist baremetal${i} | grep admin_network | awk '{ print $5 }')
495 cat >> $CONFIG/instackenv-virt.json << EOF
497 "pm_addr": "192.168.122.1",
499 "pm_password": "INSERT_STACK_USER_PRIV_KEY",
500 "pm_type": "pxe_ssh",
512 #truncate the last line to remove the comma behind the bracket
513 tail -n 1 $CONFIG/instackenv-virt.json | wc -c | xargs -I {} truncate $CONFIG/instackenv-virt.json -s -{}
515 #finally reclose the bracket and close the instackenv.json file
516 cat >> $CONFIG/instackenv-virt.json << EOF
520 "host-ip": "192.168.122.1",
521 "power_manager": "nova.virt.baremetal.virtual_power_driver.VirtualPowerManager",
523 "ssh-key": "INSERT_STACK_USER_PRIV_KEY",
529 ##Create virtual nodes in virsh
530 ##params: name - String: libvirt name for VM
531 ## bootdev - String: boot device for the VM
532 ## disksize - Number: size of the disk in GB
533 ## ovs_bridges: - List: list of ovs bridges
534 function define_vm () {
535 # Create the libvirt storage volume
536 if virsh vol-list default | grep ${1}.qcow2 2>&1> /dev/null; then
537 volume_path=$(virsh vol-path --pool default ${1}.qcow2 || echo "/var/lib/libvirt/images/${1}.qcow2")
538 echo "Volume ${1} exists. Deleting Existing Volume $volume_path"
539 virsh vol-dumpxml ${1}.qcow2 --pool default > /dev/null || echo '' #ok for this to fail
541 virsh vol-delete ${1}.qcow2 --pool default
543 virsh vol-create-as default ${1}.qcow2 ${3}G --format qcow2
544 volume_path=$(virsh vol-path --pool default ${1}.qcow2)
545 if [ ! -f $volume_path ]; then
546 echo "$volume_path Not created successfully... Aborting"
551 /usr/libexec/openstack-tripleo/configure-vm --name $1 \
553 --image "$volume_path" \
558 --libvirt-nic-driver virtio \
559 --baremetal-interface $4
562 ##Set network-environment settings
563 ##params: network-environment file to edit
564 function configure_network_environment {
566 tht_dir=/usr/share/openstack-tripleo-heat-templates/network
568 sed -i '/ControlPlaneSubnetCidr/c\\ ControlPlaneSubnetCidr: "'${admin_network_cidr##*/}'"' $1
569 sed -i '/ControlPlaneDefaultRoute/c\\ ControlPlaneDefaultRoute: '${admin_network_provisioner_ip}'' $1
570 sed -i '/ExternalNetCidr/c\\ ExternalNetCidr: '${public_network_cidr}'' $1
571 sed -i "/ExternalAllocationPools/c\\ ExternalAllocationPools: [{'start': '${public_network_usable_ip_range%%,*}', 'end': '${public_network_usable_ip_range##*,}'}]" $1
572 sed -i '/ExternalInterfaceDefaultRoute/c\\ ExternalInterfaceDefaultRoute: '${public_network_gateway}'' $1
573 sed -i '/EC2MetadataIp/c\\ EC2MetadataIp: '${admin_network_provisioner_ip}'' $1
575 # check for private network
576 if [[ ! -z "$private_network_enabled" && "$private_network_enabled" == "True" ]]; then
577 sed -i 's#^.*Network::Tenant.*$# OS::TripleO::Network::Tenant: '${tht_dir}'/tenant.yaml#' $1
578 sed -i 's#^.*Controller::Ports::TenantPort:.*$# OS::TripleO::Controller::Ports::TenantPort: '${tht_dir}'/ports/tenant.yaml#' $1
579 sed -i 's#^.*Compute::Ports::TenantPort:.*$# OS::TripleO::Compute::Ports::TenantPort: '${tht_dir}'/ports/tenant.yaml#' $1
580 sed -i "/TenantAllocationPools/c\\ TenantAllocationPools: [{'start': '${private_network_usable_ip_range%%,*}', 'end': '${private_network_usable_ip_range##*,}'}]" $1
581 sed -i '/TenantNetCidr/c\\ TenantNetCidr: '${private_network_cidr}'' $1
584 sed -i 's#^.*Network::Tenant.*$# OS::TripleO::Network::Tenant: '${tht_dir}'/noop.yaml#' $1
585 sed -i 's#^.*Controller::Ports::TenantPort:.*$# OS::TripleO::Controller::Ports::TenantPort: '${tht_dir}'/ports/noop.yaml#' $1
586 sed -i 's#^.*Compute::Ports::TenantPort:.*$# OS::TripleO::Compute::Ports::TenantPort: '${tht_dir}'/ports/noop.yaml#' $1
589 # check for storage network
590 if [[ ! -z "$storage_network_enabled" && "$storage_network_enabled" == "True" ]]; then
591 sed -i 's#^.*Network::Storage:.*$# OS::TripleO::Network::Storage: '${tht_dir}'/storage.yaml#' $1
592 sed -i 's#^.*Network::Ports::StorageVipPort:.*$# OS::TripleO::Network::Ports::StorageVipPort: '${tht_dir}'/ports/storage.yaml#' $1
593 sed -i 's#^.*Controller::Ports::StoragePort:.*$# OS::TripleO::Controller::Ports::StoragePort: '${tht_dir}'/ports/storage.yaml#' $1
594 sed -i 's#^.*Compute::Ports::StoragePort:.*$# OS::TripleO::Compute::Ports::StoragePort: '${tht_dir}'/ports/storage.yaml#' $1
595 sed -i "/StorageAllocationPools/c\\ StorageAllocationPools: [{'start': '${storage_network_usable_ip_range%%,*}', 'end': '${storage_network_usable_ip_range##*,}'}]" $1
596 sed -i '/StorageNetCidr/c\\ StorageNetCidr: '${storage_network_cidr}'' $1
599 sed -i 's#^.*Network::Storage:.*$# OS::TripleO::Network::Storage: '${tht_dir}'/noop.yaml#' $1
600 sed -i 's#^.*Network::Ports::StorageVipPort:.*$# OS::TripleO::Network::Ports::StorageVipPort: '${tht_dir}'/ports/noop.yaml#' $1
601 sed -i 's#^.*Controller::Ports::StoragePort:.*$# OS::TripleO::Controller::Ports::StoragePort: '${tht_dir}'/ports/noop.yaml#' $1
602 sed -i 's#^.*Compute::Ports::StoragePort:.*$# OS::TripleO::Compute::Ports::StoragePort: '${tht_dir}'/ports/noop.yaml#' $1
606 if [ "${deploy_options_array['sdn_l3']}" == 'true' ]; then
607 nics_cfg+=_br-ex_no-public-ip
611 ##Copy over the glance images and instackenv json file
613 function configure_undercloud {
616 echo "Copying configuration files to Undercloud"
617 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
618 configure_network_environment $CONFIG/network-environment.yaml
619 echo -e "${blue}Network Environment set for Deployment: ${reset}"
620 cat $CONFIG/network-environment.yaml
621 scp ${SSH_OPTIONS[@]} $CONFIG/network-environment.yaml "stack@$UNDERCLOUD":
622 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
624 cat > nics/controller.yaml << EOF
625 $(nics_cfg=$nics_cfg sh $CONFIG/nics-controller.yaml.template)
627 cat > nics/compute.yaml << EOF
628 $(nics_cfg=$nics_cfg sh $CONFIG/nics-compute.yaml.template)
633 # ensure stack user on Undercloud machine has an ssh key
634 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
636 if [ "$virtual" == "TRUE" ]; then
638 # copy the Undercloud VM's stack user's pub key to
639 # root's auth keys so that Undercloud can control
640 # vm power on the hypervisor
641 ssh ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> /root/.ssh/authorized_keys
643 DEPLOY_OPTIONS+=" --libvirt-type qemu"
644 INSTACKENV=$CONFIG/instackenv-virt.json
646 # upload instackenv file to Undercloud for virtual deployment
647 scp ${SSH_OPTIONS[@]} $INSTACKENV "stack@$UNDERCLOUD":instackenv.json
650 # allow stack to control power management on the hypervisor via sshkey
651 # only if this is a virtual deployment
652 if [ "$virtual" == "TRUE" ]; then
653 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
654 while read -r line; do
655 stack_key=\${stack_key}\\\\\\\\n\${line}
656 done < <(cat ~/.ssh/id_rsa)
657 stack_key=\$(echo \$stack_key | sed 's/\\\\\\\\n//')
658 sed -i 's~INSERT_STACK_USER_PRIV_KEY~'"\$stack_key"'~' instackenv.json
662 # copy stack's ssh key to this users authorized keys
663 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> ~/.ssh/authorized_keys
665 # disable requiretty for sudo
666 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "sed -i 's/Defaults\s*requiretty//'" /etc/sudoers
668 # configure undercloud on Undercloud VM
669 echo "Running undercloud configuration."
670 echo "Logging undercloud configuration to undercloud:/home/stack/apex-undercloud-install.log"
671 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
672 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
673 sed -i 's/#local_ip/local_ip/' undercloud.conf
674 sed -i 's/#network_gateway/network_gateway/' undercloud.conf
675 sed -i 's/#network_cidr/network_cidr/' undercloud.conf
676 sed -i 's/#dhcp_start/dhcp_start/' undercloud.conf
677 sed -i 's/#dhcp_end/dhcp_end/' undercloud.conf
678 sed -i 's/#inspection_iprange/inspection_iprange/' undercloud.conf
679 sed -i 's/#undercloud_debug/undercloud_debug/' undercloud.conf
681 openstack-config --set undercloud.conf DEFAULT local_ip ${admin_network_provisioner_ip}/${admin_network_cidr##*/}
682 openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_network_provisioner_ip}
683 openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_network_cidr}
684 openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_network_dhcp_range%%,*}
685 openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_network_dhcp_range##*,}
686 openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_network_introspection_range}
687 openstack-config --set undercloud.conf DEFAULT undercloud_debug false
691 sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
692 sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
693 sudo sed -i '/CephAdminKey:/c\\ CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
695 # we assume that packages will not need to be updated with undercloud install
696 # and that it will be used only to configure the undercloud
697 # packages updates would need to be handled manually with yum update
698 sudo cp -f /usr/share/diskimage-builder/elements/yum/bin/install-packages /usr/share/diskimage-builder/elements/yum/bin/install-packages.bak
699 cat << 'EOF' | sudo tee /usr/share/diskimage-builder/elements/yum/bin/install-packages > /dev/null
704 openstack undercloud install &> apex-undercloud-install.log || {
705 # cat the undercloud install log incase it fails
706 echo "ERROR: openstack undercloud install has failed. Dumping Log:"
707 cat apex-undercloud-install.log
712 sudo systemctl restart openstack-glance-api
713 sudo systemctl restart openstack-nova-conductor
714 sudo systemctl restart openstack-nova-compute
716 # WORKAROUND: must restart the above services to fix sync problem with nova compute manager
717 # TODO: revisit and file a bug if necessary. This should eventually be removed
718 # as well as glance api problem
719 echo -e "${blue}INFO: Sleeping 15 seconds while services come back from restart${reset}"
724 ##preping it for deployment and launch the deploy
726 function undercloud_prep_overcloud_deploy {
727 if [[ "${#deploy_options_array[@]}" -eq 0 || "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
728 if [ "${deploy_options_array['sdn_l3']}" == 'true' ]; then
729 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_l3.yaml"
730 elif [ "${deploy_options_array['sfc']}" == 'true' ]; then
731 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sfc.yaml"
732 elif [ "${deploy_options_array['vpn']}" == 'true' ]; then
733 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sdnvpn.yaml"
735 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight.yaml"
737 SDN_IMAGE=opendaylight
738 if [ "${deploy_options_array['sfc']}" == 'true' ]; then
740 if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
741 echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute an SFC deployment."
742 echo "Please install the opnfv-apex-opendaylight-sfc package to provide this overcloud image for deployment.${reset}"
746 elif [ "${deploy_options_array['sdn_controller']}" == 'opendaylight-external' ]; then
747 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight-external.yaml"
748 SDN_IMAGE=opendaylight
749 elif [ "${deploy_options_array['sdn_controller']}" == 'onos' ]; then
750 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/onos.yaml"
752 elif [ "${deploy_options_array['sdn_controller']}" == 'opencontrail' ]; then
753 echo -e "${red}ERROR: OpenContrail is currently unsupported...exiting${reset}"
755 elif [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'false' ]]; then
756 echo -e "${blue}INFO: SDN Controller disabled...will deploy nosdn scenario${reset}"
757 SDN_IMAGE=opendaylight
759 echo "${red}Invalid sdn_controller: ${deploy_options_array['sdn_controller']}${reset}"
760 echo "${red}Valid choices are opendaylight, opendaylight-external, onos, opencontrail, false, or null${reset}"
764 # Make sure the correct overcloud image is available
765 if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
766 echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
767 echo "Both ONOS and OpenDaylight are currently deployed from this image."
768 echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}"
772 echo "Copying overcloud image to Undercloud"
773 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
774 scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
776 # make sure ceph is installed
777 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
779 # scale compute nodes according to inventory
780 total_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/instackenv.json | grep -c memory")
782 # check if HA is enabled
783 if [[ "$ha_enabled" == "TRUE" || "$ha_enabled" == "true" ]]; then
784 DEPLOY_OPTIONS+=" --control-scale 3"
785 compute_nodes=$((total_nodes - 3))
786 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml"
788 compute_nodes=$((total_nodes - 1))
791 if [ "$compute_nodes" -le 0 ]; then
792 echo -e "${red}ERROR: Invalid number of compute nodes: ${compute_nodes}. Check your inventory file.${reset}"
795 echo -e "${blue}INFO: Number of compute nodes set for deployment: ${compute_nodes}${reset}"
796 DEPLOY_OPTIONS+=" --compute-scale ${compute_nodes}"
799 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
800 #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
801 DEPLOY_OPTIONS+=" -e network-environment.yaml"
804 if [[ "$ha_enabled" == "TRUE" || "$ha_enabled" == "true" ]] || [[ "$net_isolation_enabled" == "TRUE" ]]; then
805 DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
808 if [[ ! "$virtual" == "TRUE" ]]; then
809 DEPLOY_OPTIONS+=" --control-flavor control --compute-flavor compute"
812 DEPLOY_OPTIONS+=" -e opnfv-environment.yaml"
814 echo -e "${blue}INFO: Deploy options set:\n${DEPLOY_OPTIONS}${reset}"
816 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
817 if [ "$debug" == 'TRUE' ]; then
818 LIBGUESTFS_BACKEND=direct virt-customize -a overcloud-full.qcow2 --root-password password:opnfvapex
823 echo "Uploading overcloud glance images"
824 openstack overcloud image upload
825 echo "Configuring undercloud and discovering nodes"
826 openstack baremetal import --json instackenv.json
827 openstack baremetal configure boot
828 #if [[ -z "$virtual" ]]; then
829 # openstack baremetal introspection bulk start
831 echo "Configuring flavors"
832 for flavor in baremetal control compute; do
833 echo -e "${blue}INFO: Updating flavor: \${flavor}${reset}"
834 if openstack flavor list | grep \${flavor}; then
835 openstack flavor delete \${flavor}
837 openstack flavor create --id auto --ram 4096 --disk 39 --vcpus 1 \${flavor}
838 if ! openstack flavor list | grep \${flavor}; then
839 echo -e "${red}ERROR: Unable to create flavor \${flavor}${reset}"
842 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal
843 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="control" control
844 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="compute" compute
845 echo "Configuring nameserver on ctlplane network"
846 neutron subnet-update \$(neutron subnet-list | grep -v id | grep -v \\\\-\\\\- | awk {'print \$2'}) --dns-nameserver 8.8.8.8
847 echo "Executing overcloud deployment, this should run for an extended period without output."
848 sleep 60 #wait for Hypervisor stats to check-in to nova
849 # save deploy command so it can be used for debugging
850 cat > deploy_command << EOF
851 openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
855 if [ "$interactive" == "TRUE" ]; then
856 if ! prompt_user "Overcloud Deployment"; then
857 echo -e "${blue}INFO: User requests exit${reset}"
862 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
864 openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
865 if ! heat stack-list | grep CREATE_COMPLETE 1>/dev/null; then
866 $(typeset -f debug_stack)
872 if [ "$debug" == 'TRUE' ]; then
873 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
875 echo "Keystone Endpoint List:"
876 keystone endpoint-list
877 echo "Keystone Service List"
878 keystone service-list
879 cinder quota-show \$(openstack project list | grep admin | awk {'print \$2'})
884 ##Post configuration after install
886 function configure_post_install {
887 local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip
888 opnfv_attach_networks="admin_network public_network"
890 echo -e "${blue}INFO: Post Install Configuration Running...${reset}"
892 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
895 echo "Configuring Neutron external network"
896 neutron net-create external --router:external=True --tenant-id \$(keystone tenant-get service | grep id | awk '{ print \$4 }')
897 neutron subnet-create --name external-net --tenant-id \$(keystone tenant-get service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
900 echo -e "${blue}INFO: Checking if OVS bridges have IP addresses...${reset}"
901 for network in ${opnfv_attach_networks}; do
902 ovs_ip=$(find_ip ${NET_MAP[$network]})
904 if [ -n "$ovs_ip" ]; then
905 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} has IP address ${ovs_ip}${reset}"
907 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} missing IP, will configure${reset}"
908 # use last IP of allocation pool
909 eval "ip_range=\${${network}_usable_ip_range}"
910 ovs_ip=${ip_range##*,}
911 eval "net_cidr=\${${network}_cidr}"
912 sudo ip addr add ${ovs_ip}/${net_cidr##*/} dev ${NET_MAP[$network]}
913 sudo ip link set up ${NET_MAP[$network]}
914 tmp_ip=$(find_ip ${NET_MAP[$network]})
915 if [ -n "$tmp_ip" ]; then
916 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} IP set: ${tmp_ip}${reset}"
919 echo -e "${red}ERROR: Unable to set OVS Bridge ${NET_MAP[$network]} with IP: ${ovs_ip}${reset}"
925 # for virtual, we NAT public network through Undercloud
926 if [ "$virtual" == "TRUE" ]; then
927 if ! configure_undercloud_nat ${public_network_cidr}; then
928 echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${public_network_cidr}${reset}"
931 echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud public network${reset}"
935 # for sfc deployments we need the vxlan workaround
936 if [ "${deploy_options_array['sfc']}" == 'true' ]; then
937 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
940 for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
941 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
942 sudo ifconfig br-int up
943 sudo ip route add 123.123.123.0/24 dev br-int
949 # Collect deployment logs
950 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
951 mkdir -p ~/deploy_logs
955 for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
956 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
957 sudo cp /var/log/messages /home/heat-admin/messages.log
958 sudo chown heat-admin /home/heat-admin/messages.log
960 scp ${SSH_OPTIONS[@]} heat-admin@\$node:/home/heat-admin/messages.log ~/deploy_logs/\$node.messages.log
961 if [ "$debug" == "TRUE" ]; then
962 nova list --ip \$node
963 echo "---------------------------"
964 echo "-----/var/log/messages-----"
965 echo "---------------------------"
966 cat ~/deploy_logs/\$node.messages.log
967 echo "---------------------------"
968 echo "----------END LOG----------"
969 echo "---------------------------"
971 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
972 sudo rm -f /home/heat-admin/messages.log
976 # Print out the dashboard URL
978 echo "Overcloud dashboard available at http://\$(heat output-show overcloud PublicVip | sed 's/"//g')/dashboard"
984 echo -e "Usage:\n$0 [arguments] \n"
985 echo -e " -c|--config : Directory to configuration files. Optional. Defaults to /var/opt/opnfv/ \n"
986 echo -e " -d|--deploy-settings : Full path to deploy settings yaml file. Optional. Defaults to null \n"
987 echo -e " -i|--inventory : Full path to inventory yaml file. Required only for baremetal \n"
988 echo -e " -n|--net-settings : Full path to network settings file. Optional. \n"
989 echo -e " -p|--ping-site : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8 \n"
990 echo -e " -r|--resources : Directory to deployment resources. Optional. Defaults to /var/opt/opnfv/stack \n"
991 echo -e " -v|--virtual : Virtualize overcloud nodes instead of using baremetal. \n"
992 echo -e " --no-ha : disable High Availability deployment scheme, this assumes a single controller and single compute node \n"
993 echo -e " --flat : disable Network Isolation and use a single flat network for the underlay network.\n"
994 echo -e " --no-post-config : disable Post Install configuration."
995 echo -e " --debug : enable debug output."
996 echo -e " --interactive : enable interactive deployment mode which requires user to confirm steps of deployment."
999 ##translates the command line parameters into variables
1000 ##params: $@ the entire command line is passed
1001 ##usage: parse_cmd_line() "$@"
1003 echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
1004 echo "Use -h to display help"
1007 while [ "${1:0:1}" = "-" ]
1016 echo "Deployment Configuration Directory Overridden to: $2"
1019 -d|--deploy-settings)
1020 DEPLOY_SETTINGS_FILE=$2
1021 echo "Deployment Configuration file: $2"
1030 echo "Network Settings Configuration file: $2"
1035 echo "Using $2 as the ping site"
1040 echo "Deployment Resources Directory Overridden to: $2"
1045 echo "Executing a Virtual Deployment"
1051 echo "HA Deployment Disabled"
1055 net_isolation_enabled="FALSE"
1056 echo "Underlay Network Isolation Disabled: using flat configuration"
1061 echo "Post install configuration disabled"
1066 echo "Enable debug output"
1071 echo "Interactive mode enabled"
1081 if [[ ! -z "$NETSETS" && "$net_isolation_enabled" == "FALSE" ]]; then
1082 echo -e "${red}INFO: Single flat network requested. Only admin_network settings will be used!${reset}"
1083 elif [[ -z "$NETSETS" ]]; then
1084 echo -e "${red}ERROR: You must provide a network_settings file with -n.${reset}"
1088 if [[ -n "$virtual" && -n "$INVENTORY_FILE" ]]; then
1089 echo -e "${red}ERROR: You should not specify an inventory with virtual deployments${reset}"
1093 if [[ -z "$DEPLOY_SETTINGS_FILE" || ! -f "$DEPLOY_SETTINGS_FILE" ]]; then
1094 echo -e "${red}ERROR: Deploy Settings: ${DEPLOY_SETTINGS_FILE} does not exist! Exiting...${reset}"
1098 if [[ ! -z "$NETSETS" && ! -f "$NETSETS" ]]; then
1099 echo -e "${red}ERROR: Network Settings: ${NETSETS} does not exist! Exiting...${reset}"
1103 if [[ ! -z "$INVENTORY_FILE" && ! -f "$INVENTORY_FILE" ]]; then
1104 echo -e "{$red}ERROR: Inventory File: ${INVENTORY_FILE} does not exist! Exiting...${reset}"
1108 if [[ -z "$virtual" && -z "$INVENTORY_FILE" ]]; then
1109 echo -e "${red}ERROR: You must specify an inventory file for baremetal deployments! Exiting...${reset}"
1113 if [[ "$net_isolation_enabled" == "FALSE" && "$post_config" == "TRUE" ]]; then
1114 echo -e "${blue}INFO: Post Install Configuration will be skipped. It is not supported with --flat${reset}"
1119 # Do this after cli parse so that $CONFIG is set properly
1120 source $CONFIG/lib/common-functions.sh
1121 source $CONFIG/lib/utility-functions.sh
1122 source $CONFIG/lib/installer/onos/onos_gw_mac_update.sh
1130 echo -e "${blue}INFO: Parsing network settings file...${reset}"
1131 parse_network_settings
1132 if ! configure_deps; then
1133 echo -e "${red}Dependency Validation Failed, Exiting.${reset}"
1136 if [ -n "$DEPLOY_SETTINGS_FILE" ]; then
1137 parse_deploy_settings
1140 if [ "$virtual" == "TRUE" ]; then
1141 setup_virtual_baremetal
1142 elif [ -n "$INVENTORY_FILE" ]; then
1143 parse_inventory_file
1145 configure_undercloud
1146 undercloud_prep_overcloud_deploy
1147 if [ "$post_config" == "TRUE" ]; then
1148 if ! configure_post_install; then
1149 echo -e "${red}ERROR:Post Install Configuration Failed, Exiting.${reset}"
1152 echo -e "${blue}INFO: Post Install Configuration Complete${reset}"
1155 if [[ "${deploy_options_array['sdn_controller']}" == 'onos' ]]; then
1156 if ! onos_update_gw_mac ${public_network_cidr} ${public_network_gateway}; then
1157 echo -e "${red}ERROR:ONOS Post Install Configuration Failed, Exiting.${reset}"
1160 echo -e "${blue}INFO: ONOS Post Install Configuration Complete${reset}"