2 ##############################################################################
3 # Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
11 # Deploy script to install provisioning server for OPNFV Apex
12 # author: Dan Radez (dradez@redhat.com)
13 # author: Tim Rozet (trozet@redhat.com)
15 # Based on RDO Manager http://www.rdoproject.org
20 reset=$(tput sgr0 || echo "")
21 blue=$(tput setaf 4 || echo "")
22 red=$(tput setaf 1 || echo "")
23 green=$(tput setaf 2 || echo "")
28 ntp_server="pool.ntp.org"
29 net_isolation_enabled="TRUE"
35 declare -A deploy_options_array
38 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
40 RESOURCES=${RESOURCES:-'/var/opt/opnfv/images'}
41 CONFIG=${CONFIG:-'/var/opt/opnfv'}
42 OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network"
43 # Netmap used to map networks to OVS bridge names
44 NET_MAP['admin_network']="br-admin"
45 NET_MAP['private_network']="br-private"
46 NET_MAP['public_network']="br-public"
47 NET_MAP['storage_network']="br-storage"
48 ext_net_type="interface"
52 ##translates yaml into variables
53 ##params: filename, prefix (ex. "config_")
54 ##usage: parse_yaml opnfv_ksgen_settings.yml "config_"
57 local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034')
58 sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
59 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 |
61 indent = length($1)/2;
63 for (i in vname) {if (i > indent) {delete vname[i]}}
65 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
66 printf("%s%s%s=%s\n", "'$prefix'",vn, $2, $3);
71 ##checks if prefix exists in string
72 ##params: string, prefix
73 ##usage: contains_prefix "deploy_setting_launcher=1" "deploy_setting"
77 if echo $mystr | grep -E "^$prefix.*$" > /dev/null; then
83 ##parses variable from a string with '='
84 ##and removes global prefix
85 ##params: string, prefix
86 ##usage: parse_setting_var 'deploy_myvar=2' 'deploy_'
90 if echo $mystr | grep -E "^.+\=" > /dev/null; then
91 echo $(echo $mystr | grep -Eo "^.+\=" | tr -d '=' | sed 's/^'"$prefix"'//')
96 ##parses value from a string with '='
98 ##usage: parse_setting_value
99 parse_setting_value() {
101 echo $(echo $mystr | grep -Eo "\=.*$" | tr -d '=')
103 ##parses network settings yaml into globals
104 parse_network_settings() {
105 if output=$(python3.4 -B $CONFIG/lib/python/apex-python-utils.py parse_net_settings -n $NETSETS -i $net_isolation_enabled); then
107 echo -e "${blue}${output}${reset}"
113 ##parses deploy settings yaml into globals and options array
115 ##usage: parse_deploy_settings
116 parse_deploy_settings() {
117 local global_prefix="deploy_global_params_"
118 local options_prefix="deploy_deploy_options_"
120 local settings=$(parse_yaml $DEPLOY_SETTINGS_FILE "deploy_")
122 for this_setting in $settings; do
123 if contains_prefix $this_setting $global_prefix; then
124 myvar=$(parse_setting_var $this_setting $global_prefix)
125 if [ -z "$myvar" ]; then
126 echo -e "${red}ERROR: while parsing ${DEPLOY_SETTINGS_FILE} for setting: ${this_setting}${reset}"
128 myvalue=$(parse_setting_value $this_setting)
129 # Do not override variables set by cmdline
130 if [ -z "$(eval echo \$$myvar)" ]; then
131 eval "$myvar=\$myvalue"
132 echo -e "${blue}Global parameter set: ${myvar}:${myvalue}${reset}"
134 echo -e "${blue}Global parameter already set: ${myvar}${reset}"
136 elif contains_prefix $this_setting $options_prefix; then
137 myvar=$(parse_setting_var $this_setting $options_prefix)
138 if [ -z "$myvar" ]; then
139 echo -e "${red}ERROR: while parsing ${DEPLOY_SETTINGS_FILE} for setting: ${this_setting}${reset}"
141 myvalue=$(parse_setting_value $this_setting)
142 deploy_options_array[$myvar]=$myvalue
143 echo -e "${blue}Deploy option set: ${myvar}:${myvalue}${reset}"
147 ##parses baremetal yaml settings into compatible json
148 ##writes the json to $CONFIG/instackenv_tmp.json
150 ##usage: parse_inventory_file
151 parse_inventory_file() {
152 local inventory=$(parse_yaml $INVENTORY_FILE)
154 local node_prefix="node"
159 # detect number of nodes
160 for entry in $inventory; do
161 if echo $entry | grep -Eo "^nodes_node[0-9]+_" > /dev/null; then
162 this_node=$(echo $entry | grep -Eo "^nodes_node[0-9]+_")
163 if [[ "$inventory_list" != *"$this_node"* ]]; then
164 inventory_list+="$this_node "
169 inventory_list=$(echo $inventory_list | sed 's/ $//')
171 for node in $inventory_list; do
175 node_total=$node_count
177 if [[ "$node_total" -lt 5 && ( ha_enabled == "TRUE" || "$ha_enabled" == "true" ) ]]; then
178 echo -e "${red}ERROR: You must provide at least 5 nodes for HA baremetal deployment${reset}"
180 elif [[ "$node_total" -lt 2 ]]; then
181 echo -e "${red}ERROR: You must provide at least 2 nodes for non-HA baremetal deployment${reset}"
185 eval $(parse_yaml $INVENTORY_FILE) || {
186 echo "${red}Failed to parse inventory.yaml. Aborting.${reset}"
196 for node in $inventory_list; do
200 \"pm_password\": \"$(eval echo \${${node}ipmi_pass})\",
201 \"pm_type\": \"$(eval echo \${${node}pm_type})\",
203 \"$(eval echo \${${node}mac_address})\"
205 \"cpu\": \"$(eval echo \${${node}cpus})\",
206 \"memory\": \"$(eval echo \${${node}memory})\",
207 \"disk\": \"$(eval echo \${${node}disk})\",
208 \"arch\": \"$(eval echo \${${node}arch})\",
209 \"pm_user\": \"$(eval echo \${${node}ipmi_user})\",
210 \"pm_addr\": \"$(eval echo \${${node}ipmi_ip})\",
211 \"capabilities\": \"$(eval echo \${${node}capabilities})\"
213 instackenv_output+=${node_output}
214 if [ $node_count -lt $node_total ]; then
215 instackenv_output+=" },"
217 instackenv_output+=" }"
225 #Copy instackenv.json to undercloud for baremetal
226 echo -e "{blue}Parsed instackenv JSON:\n${instackenv_output}${reset}"
227 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
228 cat > instackenv.json << EOF
234 ##verify internet connectivity
236 function verify_internet {
237 if ping -c 2 $ping_site > /dev/null; then
238 if ping -c 2 www.google.com > /dev/null; then
239 echo "${blue}Internet connectivity detected${reset}"
242 echo "${red}Internet connectivity detected, but DNS lookup failed${reset}"
246 echo "${red}No internet connectivity detected${reset}"
251 ##download dependencies if missing and configure host
253 function configure_deps {
254 if ! verify_internet; then
255 echo "${red}Will not download dependencies${reset}"
259 # verify ip forwarding
260 if sysctl net.ipv4.ip_forward | grep 0; then
261 sudo sysctl -w net.ipv4.ip_forward=1
262 sudo sh -c "echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf"
265 # ensure no dhcp server is running on jumphost
266 if ! sudo systemctl status dhcpd | grep dead; then
267 echo "${red}WARN: DHCP Server detected on jumphost, disabling...${reset}"
268 sudo systemctl stop dhcpd
269 sudo systemctl disable dhcpd
272 # ensure networks are configured
273 systemctl status libvirtd || systemctl start libvirtd
274 systemctl status openvswitch || systemctl start openvswitch
276 # If flat we only use admin network
277 if [[ "$net_isolation_enabled" == "FALSE" ]]; then
278 virsh_enabled_networks="admin_network"
279 enabled_network_list="admin_network"
280 # For baremetal we only need to create/attach Undercloud to admin and public
281 elif [ "$virtual" == "FALSE" ]; then
282 virsh_enabled_networks="admin_network public_network"
284 virsh_enabled_networks=$enabled_network_list
287 # ensure default network is configured correctly
288 libvirt_dir="/usr/share/libvirt/networks"
289 virsh net-list --all | grep default || virsh net-define ${libvirt_dir}/default.xml
290 virsh net-list --all | grep -E "default\s+active" > /dev/null || virsh net-start default
291 virsh net-list --all | grep -E "default\s+active\s+yes" > /dev/null || virsh net-autostart --network default
293 for network in ${OPNFV_NETWORK_TYPES}; do
294 echo "${blue}INFO: Creating Virsh Network: $network & OVS Bridge: ${NET_MAP[$network]}${reset}"
295 ovs-vsctl list-br | grep ${NET_MAP[$network]} > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]}
296 virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
298 <name>$network</name>
299 <forward mode='bridge'/>
300 <bridge name='${NET_MAP[$network]}'/>
301 <virtualport type='openvswitch'/>
304 if ! (virsh net-list --all | grep $network > /dev/null); then
305 echo "${red}ERROR: unable to create network: ${network}${reset}"
308 rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null;
309 virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network
310 virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network
313 echo -e "${blue}INFO: Bridges set: ${reset}"
315 echo -e "${blue}INFO: virsh networks set: ${reset}"
318 if [[ -z "$virtual" || "$virtual" == "FALSE" ]]; then
319 # bridge interfaces to correct OVS instances for baremetal deployment
320 for network in ${enabled_network_list}; do
321 if [[ "$network" != "admin_network" && "$network" != "public_network" ]]; then
324 this_interface=$(eval echo \${${network}_bridged_interface})
325 # check if this a bridged interface for this network
326 if [[ ! -z "$this_interface" || "$this_interface" != "none" ]]; then
327 if ! attach_interface_to_ovs ${NET_MAP[$network]} ${this_interface} ${network}; then
328 echo -e "${red}ERROR: Unable to bridge interface ${this_interface} to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
331 echo -e "${blue}INFO: Interface ${this_interface} bridged to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
334 echo "${red}ERROR: Unable to determine interface to bridge to for enabled network: ${network}${reset}"
340 # ensure storage pool exists and is started
341 virsh pool-list --all | grep default > /dev/null || virsh pool-define-as --name default dir --target /var/lib/libvirt/images
342 virsh pool-list | grep -Eo "default\s+active" > /dev/null || (virsh pool-autostart default; virsh pool-start default)
344 if ! egrep '^flags.*(vmx|svm)' /proc/cpuinfo > /dev/null; then
345 echo "${red}virtualization extensions not found, kvm kernel module insertion may fail.\n \
346 Are you sure you have enabled vmx in your bios or hypervisor?${reset}"
349 if ! lsmod | grep kvm > /dev/null; then modprobe kvm; fi
350 if ! lsmod | grep kvm_intel > /dev/null; then modprobe kvm_intel; fi
352 if ! lsmod | grep kvm > /dev/null; then
353 echo "${red}kvm kernel modules not loaded!${reset}"
358 if [ ! -e ~/.ssh/id_rsa.pub ]; then
359 ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
362 echo "${blue}All dependencies installed and running${reset}"
365 ##verify vm exists, an has a dhcp lease assigned to it
367 function setup_undercloud_vm {
368 if ! virsh list --all | grep undercloud > /dev/null; then
369 undercloud_nets="default admin_network"
370 if [[ $enabled_network_list =~ "public_network" ]]; then
371 undercloud_nets+=" public_network"
373 define_vm undercloud hd 30 "$undercloud_nets"
375 ### this doesn't work for some reason I was getting hangup events so using cp instead
376 #virsh vol-upload --pool default --vol undercloud.qcow2 --file $CONFIG/stack/undercloud.qcow2
377 #2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem <http://bugs.centos.org>, 2015-11-03-13:56:46, worker1.bsys.centos.org)
378 #2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
379 #2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
380 #error: cannot close volume undercloud.qcow2
381 #error: internal error: received hangup / error event on socket
382 #error: Reconnected to the hypervisor
384 local undercloud_dst=/var/lib/libvirt/images/undercloud.qcow2
385 cp -f $RESOURCES/undercloud.qcow2 $undercloud_dst
387 # resize Undercloud machine
388 echo "Checking if Undercloud needs to be resized..."
389 undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
390 if [ "$undercloud_size" -lt 30 ]; then
391 qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G
392 LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/undercloud.qcow2 $undercloud_dst
393 LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
394 new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
395 if [ "$new_size" -lt 30 ]; then
396 echo "Error resizing Undercloud machine, disk size is ${new_size}"
399 echo "Undercloud successfully resized"
402 echo "Skipped Undercloud resize, upstream is large enough"
406 echo "Found Undercloud VM, using existing VM"
409 # if the VM is not running update the authkeys and start it
410 if ! virsh list | grep undercloud > /dev/null; then
411 echo "Injecting ssh key to Undercloud VM"
412 LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command "mkdir -p /root/.ssh/" \
413 --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
414 --run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
415 --run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
416 --run-command "chown stack:stack /home/stack/.ssh/authorized_keys && chmod 600 /home/stack/.ssh/authorized_keys"
417 virsh start undercloud
420 sleep 10 # let undercloud get started up
422 # get the undercloud VM IP
424 echo -n "${blue}Waiting for Undercloud's dhcp address${reset}"
425 undercloud_mac=$(virsh domiflist undercloud | grep default | awk '{ print $5 }')
426 while ! $(arp -e | grep ${undercloud_mac} > /dev/null) && [ $CNT -gt 0 ]; do
431 UNDERCLOUD=$(arp -e | grep ${undercloud_mac} | awk {'print $1'})
433 if [ -z "$UNDERCLOUD" ]; then
434 echo "\n\nCan't get IP for Undercloud. Can Not Continue."
437 echo -e "${blue}\rUndercloud VM has IP $UNDERCLOUD${reset}"
441 echo -en "${blue}\rValidating Undercloud VM connectivity${reset}"
442 while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do
447 if [ "$CNT" -eq 0 ]; then
448 echo "Failed to contact Undercloud. Can Not Continue"
452 while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do
457 if [ "$CNT" -eq 0 ]; then
458 echo "Failed to connect to Undercloud. Can Not Continue"
462 # extra space to overwrite the previous connectivity output
463 echo -e "${blue}\r ${reset}"
465 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth2 | grep ${public_network_provisioner_ip} > /dev/null; then ip a a ${public_network_provisioner_ip}/${public_network_cidr##*/} dev eth2; ip link set up dev eth2; fi"
467 # ssh key fix for stack user
468 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack"
471 ##Create virtual nodes in virsh
473 function setup_virtual_baremetal {
474 #start by generating the opening json for instackenv.json
475 cat > $CONFIG/instackenv-virt.json << EOF
480 # next create the virtual machines and add their definitions to the file
481 for i in $(seq 0 $vm_index); do
482 if ! virsh list --all | grep baremetal${i} > /dev/null; then
483 define_vm baremetal${i} network 41 'admin_network'
484 for n in private_network public_network storage_network; do
485 if [[ $enabled_network_list =~ $n ]]; then
487 virsh attach-interface --domain baremetal${i} --type network --source $n --model rtl8139 --config
491 echo "Found Baremetal ${i} VM, using existing VM"
493 #virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2
494 mac=$(virsh domiflist baremetal${i} | grep admin_network | awk '{ print $5 }')
496 cat >> $CONFIG/instackenv-virt.json << EOF
498 "pm_addr": "192.168.122.1",
500 "pm_password": "INSERT_STACK_USER_PRIV_KEY",
501 "pm_type": "pxe_ssh",
513 #truncate the last line to remove the comma behind the bracket
514 tail -n 1 $CONFIG/instackenv-virt.json | wc -c | xargs -I {} truncate $CONFIG/instackenv-virt.json -s -{}
516 #finally reclose the bracket and close the instackenv.json file
517 cat >> $CONFIG/instackenv-virt.json << EOF
521 "host-ip": "192.168.122.1",
522 "power_manager": "nova.virt.baremetal.virtual_power_driver.VirtualPowerManager",
524 "ssh-key": "INSERT_STACK_USER_PRIV_KEY",
530 ##Create virtual nodes in virsh
531 ##params: name - String: libvirt name for VM
532 ## bootdev - String: boot device for the VM
533 ## disksize - Number: size of the disk in GB
534 ## ovs_bridges: - List: list of ovs bridges
535 function define_vm () {
536 # Create the libvirt storage volume
537 if virsh vol-list default | grep ${1}.qcow2 2>&1> /dev/null; then
538 volume_path=$(virsh vol-path --pool default ${1}.qcow2 || echo "/var/lib/libvirt/images/${1}.qcow2")
539 echo "Volume ${1} exists. Deleting Existing Volume $volume_path"
540 virsh vol-dumpxml ${1}.qcow2 --pool default > /dev/null || echo '' #ok for this to fail
542 virsh vol-delete ${1}.qcow2 --pool default
544 virsh vol-create-as default ${1}.qcow2 ${3}G --format qcow2
545 volume_path=$(virsh vol-path --pool default ${1}.qcow2)
546 if [ ! -f $volume_path ]; then
547 echo "$volume_path Not created successfully... Aborting"
552 /usr/libexec/openstack-tripleo/configure-vm --name $1 \
554 --image "$volume_path" \
559 --libvirt-nic-driver virtio \
560 --baremetal-interface $4
563 ##Set network-environment settings
564 ##params: network-environment file to edit
565 function configure_network_environment {
567 tht_dir=/usr/share/openstack-tripleo-heat-templates/network
569 sed -i '/ControlPlaneSubnetCidr/c\\ ControlPlaneSubnetCidr: "'${admin_network_cidr##*/}'"' $1
570 sed -i '/ControlPlaneDefaultRoute/c\\ ControlPlaneDefaultRoute: '${admin_network_provisioner_ip}'' $1
571 sed -i '/ExternalNetCidr/c\\ ExternalNetCidr: '${public_network_cidr}'' $1
572 sed -i "/ExternalAllocationPools/c\\ ExternalAllocationPools: [{'start': '${public_network_usable_ip_range%%,*}', 'end': '${public_network_usable_ip_range##*,}'}]" $1
573 sed -i '/ExternalInterfaceDefaultRoute/c\\ ExternalInterfaceDefaultRoute: '${public_network_gateway}'' $1
574 sed -i '/EC2MetadataIp/c\\ EC2MetadataIp: '${admin_network_provisioner_ip}'' $1
576 # check for private network
577 if [[ ! -z "$private_network_enabled" && "$private_network_enabled" == "True" ]]; then
578 sed -i 's#^.*Network::Tenant.*$# OS::TripleO::Network::Tenant: '${tht_dir}'/tenant.yaml#' $1
579 sed -i 's#^.*Controller::Ports::TenantPort:.*$# OS::TripleO::Controller::Ports::TenantPort: '${tht_dir}'/ports/tenant.yaml#' $1
580 sed -i 's#^.*Compute::Ports::TenantPort:.*$# OS::TripleO::Compute::Ports::TenantPort: '${tht_dir}'/ports/tenant.yaml#' $1
581 sed -i "/TenantAllocationPools/c\\ TenantAllocationPools: [{'start': '${private_network_usable_ip_range%%,*}', 'end': '${private_network_usable_ip_range##*,}'}]" $1
582 sed -i '/TenantNetCidr/c\\ TenantNetCidr: '${private_network_cidr}'' $1
584 sed -i 's#^.*Network::Tenant.*$# OS::TripleO::Network::Tenant: '${tht_dir}'/noop.yaml#' $1
585 sed -i 's#^.*Controller::Ports::TenantPort:.*$# OS::TripleO::Controller::Ports::TenantPort: '${tht_dir}'/ports/noop.yaml#' $1
586 sed -i 's#^.*Compute::Ports::TenantPort:.*$# OS::TripleO::Compute::Ports::TenantPort: '${tht_dir}'/ports/noop.yaml#' $1
589 # check for storage network
590 if [[ ! -z "$storage_network_enabled" && "$storage_network_enabled" == "True" ]]; then
591 sed -i 's#^.*Network::Storage:.*$# OS::TripleO::Network::Storage: '${tht_dir}'/storage.yaml#' $1
592 sed -i 's#^.*Network::Ports::StorageVipPort:.*$# OS::TripleO::Network::Ports::StorageVipPort: '${tht_dir}'/ports/storage.yaml#' $1
593 sed -i 's#^.*Controller::Ports::StoragePort:.*$# OS::TripleO::Controller::Ports::StoragePort: '${tht_dir}'/ports/storage.yaml#' $1
594 sed -i 's#^.*Compute::Ports::StoragePort:.*$# OS::TripleO::Compute::Ports::StoragePort: '${tht_dir}'/ports/storage.yaml#' $1
595 sed -i "/StorageAllocationPools/c\\ StorageAllocationPools: [{'start': '${storage_network_usable_ip_range%%,*}', 'end': '${storage_network_usable_ip_range##*,}'}]" $1
596 sed -i '/StorageNetCidr/c\\ StorageNetCidr: '${storage_network_cidr}'' $1
598 sed -i 's#^.*Network::Storage:.*$# OS::TripleO::Network::Storage: '${tht_dir}'/noop.yaml#' $1
599 sed -i 's#^.*Network::Ports::StorageVipPort:.*$# OS::TripleO::Network::Ports::StorageVipPort: '${tht_dir}'/ports/noop.yaml#' $1
600 sed -i 's#^.*Controller::Ports::StoragePort:.*$# OS::TripleO::Controller::Ports::StoragePort: '${tht_dir}'/ports/noop.yaml#' $1
601 sed -i 's#^.*Compute::Ports::StoragePort:.*$# OS::TripleO::Compute::Ports::StoragePort: '${tht_dir}'/ports/noop.yaml#' $1
605 if [ "${deploy_options_array['sdn_l3']}" == 'true' ]; then
610 ##Copy over the glance images and instackenv json file
612 function configure_undercloud {
615 echo "Copying configuration files to Undercloud"
616 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
617 configure_network_environment $CONFIG/network-environment.yaml
618 echo -e "${blue}Network Environment set for Deployment: ${reset}"
619 cat $CONFIG/network-environment.yaml
620 scp ${SSH_OPTIONS[@]} $CONFIG/network-environment.yaml "stack@$UNDERCLOUD":
621 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
623 cat > nics/controller.yaml << EOF
624 $(python3.4 -B $CONFIG/lib/python/apex-python-utils.py nic_template -d $CONFIG -f nics-controller.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family)
626 cat > nics/compute.yaml << EOF
627 $(python3.4 -B $CONFIG/lib/python/apex-python-utils.py nic_template -d $CONFIG -f nics-compute.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family)
632 # ensure stack user on Undercloud machine has an ssh key
633 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
635 if [ "$virtual" == "TRUE" ]; then
637 # copy the Undercloud VM's stack user's pub key to
638 # root's auth keys so that Undercloud can control
639 # vm power on the hypervisor
640 ssh ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> /root/.ssh/authorized_keys
642 DEPLOY_OPTIONS+=" --libvirt-type qemu"
643 INSTACKENV=$CONFIG/instackenv-virt.json
645 # upload instackenv file to Undercloud for virtual deployment
646 scp ${SSH_OPTIONS[@]} $INSTACKENV "stack@$UNDERCLOUD":instackenv.json
649 # allow stack to control power management on the hypervisor via sshkey
650 # only if this is a virtual deployment
651 if [ "$virtual" == "TRUE" ]; then
652 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
653 while read -r line; do
654 stack_key=\${stack_key}\\\\\\\\n\${line}
655 done < <(cat ~/.ssh/id_rsa)
656 stack_key=\$(echo \$stack_key | sed 's/\\\\\\\\n//')
657 sed -i 's~INSERT_STACK_USER_PRIV_KEY~'"\$stack_key"'~' instackenv.json
661 # copy stack's ssh key to this users authorized keys
662 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> ~/.ssh/authorized_keys
664 # disable requiretty for sudo
665 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "sed -i 's/Defaults\s*requiretty//'" /etc/sudoers
667 # configure undercloud on Undercloud VM
668 echo "Running undercloud configuration."
669 echo "Logging undercloud configuration to undercloud:/home/stack/apex-undercloud-install.log"
670 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
671 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
672 sed -i 's/#local_ip/local_ip/' undercloud.conf
673 sed -i 's/#network_gateway/network_gateway/' undercloud.conf
674 sed -i 's/#network_cidr/network_cidr/' undercloud.conf
675 sed -i 's/#dhcp_start/dhcp_start/' undercloud.conf
676 sed -i 's/#dhcp_end/dhcp_end/' undercloud.conf
677 sed -i 's/#inspection_iprange/inspection_iprange/' undercloud.conf
678 sed -i 's/#undercloud_debug/undercloud_debug/' undercloud.conf
680 openstack-config --set undercloud.conf DEFAULT local_ip ${admin_network_provisioner_ip}/${admin_network_cidr##*/}
681 openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_network_provisioner_ip}
682 openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_network_cidr}
683 openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_network_dhcp_range%%,*}
684 openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_network_dhcp_range##*,}
685 openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_network_introspection_range}
686 openstack-config --set undercloud.conf DEFAULT undercloud_debug false
690 sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
691 sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
692 sudo sed -i '/CephAdminKey:/c\\ CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
694 # we assume that packages will not need to be updated with undercloud install
695 # and that it will be used only to configure the undercloud
696 # packages updates would need to be handled manually with yum update
697 sudo cp -f /usr/share/diskimage-builder/elements/yum/bin/install-packages /usr/share/diskimage-builder/elements/yum/bin/install-packages.bak
698 cat << 'EOF' | sudo tee /usr/share/diskimage-builder/elements/yum/bin/install-packages > /dev/null
703 openstack undercloud install &> apex-undercloud-install.log || {
704 # cat the undercloud install log incase it fails
705 echo "ERROR: openstack undercloud install has failed. Dumping Log:"
706 cat apex-undercloud-install.log
711 sudo systemctl restart openstack-glance-api
712 sudo systemctl restart openstack-nova-conductor
713 sudo systemctl restart openstack-nova-compute
715 # WORKAROUND: must restart the above services to fix sync problem with nova compute manager
716 # TODO: revisit and file a bug if necessary. This should eventually be removed
717 # as well as glance api problem
718 echo -e "${blue}INFO: Sleeping 15 seconds while services come back from restart${reset}"
723 ##preping it for deployment and launch the deploy
725 function undercloud_prep_overcloud_deploy {
726 if [[ "${#deploy_options_array[@]}" -eq 0 || "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
727 if [ "${deploy_options_array['sdn_l3']}" == 'true' ]; then
728 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_l3.yaml"
729 elif [ "${deploy_options_array['sfc']}" == 'true' ]; then
730 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sfc.yaml"
731 elif [ "${deploy_options_array['vpn']}" == 'true' ]; then
732 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sdnvpn.yaml"
734 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight.yaml"
736 SDN_IMAGE=opendaylight
737 if [ "${deploy_options_array['sfc']}" == 'true' ]; then
739 if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
740 echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute an SFC deployment."
741 echo "Please install the opnfv-apex-opendaylight-sfc package to provide this overcloud image for deployment.${reset}"
745 elif [ "${deploy_options_array['sdn_controller']}" == 'opendaylight-external' ]; then
746 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight-external.yaml"
747 SDN_IMAGE=opendaylight
748 elif [ "${deploy_options_array['sdn_controller']}" == 'onos' ]; then
749 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/onos.yaml"
751 elif [ "${deploy_options_array['sdn_controller']}" == 'opencontrail' ]; then
752 echo -e "${red}ERROR: OpenContrail is currently unsupported...exiting${reset}"
754 elif [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'false' ]]; then
755 echo -e "${blue}INFO: SDN Controller disabled...will deploy nosdn scenario${reset}"
756 SDN_IMAGE=opendaylight
758 echo "${red}Invalid sdn_controller: ${deploy_options_array['sdn_controller']}${reset}"
759 echo "${red}Valid choices are opendaylight, opendaylight-external, onos, opencontrail, false, or null${reset}"
763 # Make sure the correct overcloud image is available
764 if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
765 echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
766 echo "Both ONOS and OpenDaylight are currently deployed from this image."
767 echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}"
771 echo "Copying overcloud image to Undercloud"
772 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
773 scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
775 # make sure ceph is installed
776 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
778 # scale compute nodes according to inventory
779 total_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/instackenv.json | grep -c memory")
781 # check if HA is enabled
782 if [[ "$ha_enabled" == "TRUE" || "$ha_enabled" == "true" ]]; then
783 DEPLOY_OPTIONS+=" --control-scale 3"
784 compute_nodes=$((total_nodes - 3))
785 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml"
787 compute_nodes=$((total_nodes - 1))
790 if [ "$compute_nodes" -le 0 ]; then
791 echo -e "${red}ERROR: Invalid number of compute nodes: ${compute_nodes}. Check your inventory file.${reset}"
794 echo -e "${blue}INFO: Number of compute nodes set for deployment: ${compute_nodes}${reset}"
795 DEPLOY_OPTIONS+=" --compute-scale ${compute_nodes}"
798 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
799 #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
800 DEPLOY_OPTIONS+=" -e network-environment.yaml"
803 if [[ "$ha_enabled" == "TRUE" || "$ha_enabled" == "true" ]] || [[ "$net_isolation_enabled" == "TRUE" ]]; then
804 DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
807 if [[ ! "$virtual" == "TRUE" ]]; then
808 DEPLOY_OPTIONS+=" --control-flavor control --compute-flavor compute"
811 DEPLOY_OPTIONS+=" -e opnfv-environment.yaml"
813 echo -e "${blue}INFO: Deploy options set:\n${DEPLOY_OPTIONS}${reset}"
815 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
816 if [ "$debug" == 'TRUE' ]; then
817 LIBGUESTFS_BACKEND=direct virt-customize -a overcloud-full.qcow2 --root-password password:opnfvapex
822 echo "Uploading overcloud glance images"
823 openstack overcloud image upload
824 echo "Configuring undercloud and discovering nodes"
825 openstack baremetal import --json instackenv.json
826 openstack baremetal configure boot
827 #if [[ -z "$virtual" ]]; then
828 # openstack baremetal introspection bulk start
830 echo "Configuring flavors"
831 for flavor in baremetal control compute; do
832 echo -e "${blue}INFO: Updating flavor: \${flavor}${reset}"
833 if openstack flavor list | grep \${flavor}; then
834 openstack flavor delete \${flavor}
836 openstack flavor create --id auto --ram 4096 --disk 39 --vcpus 1 \${flavor}
837 if ! openstack flavor list | grep \${flavor}; then
838 echo -e "${red}ERROR: Unable to create flavor \${flavor}${reset}"
841 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal
842 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="control" control
843 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="compute" compute
844 echo "Configuring nameserver on ctlplane network"
845 neutron subnet-update \$(neutron subnet-list | grep -v id | grep -v \\\\-\\\\- | awk {'print \$2'}) --dns-nameserver 8.8.8.8
846 echo "Executing overcloud deployment, this should run for an extended period without output."
847 sleep 60 #wait for Hypervisor stats to check-in to nova
848 # save deploy command so it can be used for debugging
849 cat > deploy_command << EOF
850 openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
854 if [ "$interactive" == "TRUE" ]; then
855 if ! prompt_user "Overcloud Deployment"; then
856 echo -e "${blue}INFO: User requests exit${reset}"
861 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
863 openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
864 if ! heat stack-list | grep CREATE_COMPLETE 1>/dev/null; then
865 $(typeset -f debug_stack)
871 if [ "$debug" == 'TRUE' ]; then
872 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
874 echo "Keystone Endpoint List:"
875 keystone endpoint-list
876 echo "Keystone Service List"
877 keystone service-list
878 cinder quota-show \$(openstack project list | grep admin | awk {'print \$2'})
883 ##Post configuration after install
885 function configure_post_install {
886 local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip
887 opnfv_attach_networks="admin_network public_network"
889 echo -e "${blue}INFO: Post Install Configuration Running...${reset}"
891 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
894 echo "Configuring Neutron external network"
895 neutron net-create external --router:external=True --tenant-id \$(keystone tenant-get service | grep id | awk '{ print \$4 }')
896 neutron subnet-create --name external-net --tenant-id \$(keystone tenant-get service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
899 echo -e "${blue}INFO: Checking if OVS bridges have IP addresses...${reset}"
900 for network in ${opnfv_attach_networks}; do
901 ovs_ip=$(find_ip ${NET_MAP[$network]})
903 if [ -n "$ovs_ip" ]; then
904 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} has IP address ${ovs_ip}${reset}"
906 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} missing IP, will configure${reset}"
907 # use last IP of allocation pool
908 eval "ip_range=\${${network}_usable_ip_range}"
909 ovs_ip=${ip_range##*,}
910 eval "net_cidr=\${${network}_cidr}"
911 sudo ip addr add ${ovs_ip}/${net_cidr##*/} dev ${NET_MAP[$network]}
912 sudo ip link set up ${NET_MAP[$network]}
913 tmp_ip=$(find_ip ${NET_MAP[$network]})
914 if [ -n "$tmp_ip" ]; then
915 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} IP set: ${tmp_ip}${reset}"
918 echo -e "${red}ERROR: Unable to set OVS Bridge ${NET_MAP[$network]} with IP: ${ovs_ip}${reset}"
924 # for virtual, we NAT public network through Undercloud
925 if [ "$virtual" == "TRUE" ]; then
926 if ! configure_undercloud_nat ${public_network_cidr}; then
927 echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${public_network_cidr}${reset}"
930 echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud public network${reset}"
934 # for sfc deployments we need the vxlan workaround
935 if [ "${deploy_options_array['sfc']}" == 'true' ]; then
936 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
939 for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
940 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
941 sudo ifconfig br-int up
942 sudo ip route add 123.123.123.0/24 dev br-int
948 # Collect deployment logs
949 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
950 mkdir -p ~/deploy_logs
954 for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
955 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
956 sudo cp /var/log/messages /home/heat-admin/messages.log
957 sudo chown heat-admin /home/heat-admin/messages.log
959 scp ${SSH_OPTIONS[@]} heat-admin@\$node:/home/heat-admin/messages.log ~/deploy_logs/\$node.messages.log
960 if [ "$debug" == "TRUE" ]; then
961 nova list --ip \$node
962 echo "---------------------------"
963 echo "-----/var/log/messages-----"
964 echo "---------------------------"
965 cat ~/deploy_logs/\$node.messages.log
966 echo "---------------------------"
967 echo "----------END LOG----------"
968 echo "---------------------------"
970 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
971 sudo rm -f /home/heat-admin/messages.log
975 # Print out the dashboard URL
977 echo "Overcloud dashboard available at http://\$(heat output-show overcloud PublicVip | sed 's/"//g')/dashboard"
983 echo -e "Usage:\n$0 [arguments] \n"
984 echo -e " -c|--config : Directory to configuration files. Optional. Defaults to /var/opt/opnfv/ \n"
985 echo -e " -d|--deploy-settings : Full path to deploy settings yaml file. Optional. Defaults to null \n"
986 echo -e " -i|--inventory : Full path to inventory yaml file. Required only for baremetal \n"
987 echo -e " -n|--net-settings : Full path to network settings file. Optional. \n"
988 echo -e " -p|--ping-site : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8 \n"
989 echo -e " -r|--resources : Directory to deployment resources. Optional. Defaults to /var/opt/opnfv/stack \n"
990 echo -e " -v|--virtual : Virtualize overcloud nodes instead of using baremetal. \n"
991 echo -e " --no-ha : disable High Availability deployment scheme, this assumes a single controller and single compute node \n"
992 echo -e " --flat : disable Network Isolation and use a single flat network for the underlay network.\n"
993 echo -e " --no-post-config : disable Post Install configuration."
994 echo -e " --debug : enable debug output."
995 echo -e " --interactive : enable interactive deployment mode which requires user to confirm steps of deployment."
998 ##translates the command line parameters into variables
999 ##params: $@ the entire command line is passed
1000 ##usage: parse_cmd_line() "$@"
1002 echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
1003 echo "Use -h to display help"
1006 while [ "${1:0:1}" = "-" ]
1015 echo "Deployment Configuration Directory Overridden to: $2"
1018 -d|--deploy-settings)
1019 DEPLOY_SETTINGS_FILE=$2
1020 echo "Deployment Configuration file: $2"
1029 echo "Network Settings Configuration file: $2"
1034 echo "Using $2 as the ping site"
1039 echo "Deployment Resources Directory Overridden to: $2"
1044 echo "Executing a Virtual Deployment"
1050 echo "HA Deployment Disabled"
1054 net_isolation_enabled="FALSE"
1055 echo "Underlay Network Isolation Disabled: using flat configuration"
1060 echo "Post install configuration disabled"
1065 echo "Enable debug output"
1070 echo "Interactive mode enabled"
1080 if [[ ! -z "$NETSETS" && "$net_isolation_enabled" == "FALSE" ]]; then
1081 echo -e "${red}INFO: Single flat network requested. Only admin_network settings will be used!${reset}"
1082 elif [[ -z "$NETSETS" ]]; then
1083 echo -e "${red}ERROR: You must provide a network_settings file with -n.${reset}"
1087 if [[ -n "$virtual" && -n "$INVENTORY_FILE" ]]; then
1088 echo -e "${red}ERROR: You should not specify an inventory with virtual deployments${reset}"
1092 if [[ -z "$DEPLOY_SETTINGS_FILE" || ! -f "$DEPLOY_SETTINGS_FILE" ]]; then
1093 echo -e "${red}ERROR: Deploy Settings: ${DEPLOY_SETTINGS_FILE} does not exist! Exiting...${reset}"
1097 if [[ ! -z "$NETSETS" && ! -f "$NETSETS" ]]; then
1098 echo -e "${red}ERROR: Network Settings: ${NETSETS} does not exist! Exiting...${reset}"
1102 if [[ ! -z "$INVENTORY_FILE" && ! -f "$INVENTORY_FILE" ]]; then
1103 echo -e "{$red}ERROR: Inventory File: ${INVENTORY_FILE} does not exist! Exiting...${reset}"
1107 if [[ -z "$virtual" && -z "$INVENTORY_FILE" ]]; then
1108 echo -e "${red}ERROR: You must specify an inventory file for baremetal deployments! Exiting...${reset}"
1112 if [[ "$net_isolation_enabled" == "FALSE" && "$post_config" == "TRUE" ]]; then
1113 echo -e "${blue}INFO: Post Install Configuration will be skipped. It is not supported with --flat${reset}"
1118 # Do this after cli parse so that $CONFIG is set properly
1119 source $CONFIG/lib/common-functions.sh
1120 source $CONFIG/lib/utility-functions.sh
1121 source $CONFIG/lib/installer/onos/onos_gw_mac_update.sh
1128 # Make sure jinja2 is installed
1129 easy_install-3.4 jinja2 > /dev/null
1131 echo -e "${blue}INFO: Parsing network settings file...${reset}"
1132 parse_network_settings
1133 if ! configure_deps; then
1134 echo -e "${red}Dependency Validation Failed, Exiting.${reset}"
1137 if [ -n "$DEPLOY_SETTINGS_FILE" ]; then
1138 parse_deploy_settings
1141 if [ "$virtual" == "TRUE" ]; then
1142 setup_virtual_baremetal
1143 elif [ -n "$INVENTORY_FILE" ]; then
1144 parse_inventory_file
1146 configure_undercloud
1147 undercloud_prep_overcloud_deploy
1148 if [ "$post_config" == "TRUE" ]; then
1149 if ! configure_post_install; then
1150 echo -e "${red}ERROR:Post Install Configuration Failed, Exiting.${reset}"
1153 echo -e "${blue}INFO: Post Install Configuration Complete${reset}"
1156 if [[ "${deploy_options_array['sdn_controller']}" == 'onos' ]]; then
1157 if ! onos_update_gw_mac ${public_network_cidr} ${public_network_gateway}; then
1158 echo -e "${red}ERROR:ONOS Post Install Configuration Failed, Exiting.${reset}"
1161 echo -e "${blue}INFO: ONOS Post Install Configuration Complete${reset}"