2 ##############################################################################
3 # Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
11 # Deploy script to install provisioning server for OPNFV Apex
12 # author: Dan Radez (dradez@redhat.com)
13 # author: Tim Rozet (trozet@redhat.com)
15 # Based on RDO Manager http://www.rdoproject.org
20 reset=$(tput sgr0 || echo "")
21 blue=$(tput setaf 4 || echo "")
22 red=$(tput setaf 1 || echo "")
23 green=$(tput setaf 2 || echo "")
27 ntp_server="pool.ntp.org"
28 net_isolation_enabled="TRUE"
34 declare -A deploy_options_array
35 declare -a performance_options
38 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
40 CONFIG=${CONFIG:-'/var/opt/opnfv'}
41 RESOURCES=${RESOURCES:-"$CONFIG/images"}
42 LIB=${LIB:-"$CONFIG/lib"}
43 OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network api_network"
49 # Netmap used to map networks to OVS bridge names
50 NET_MAP['admin_network']="br-admin"
51 NET_MAP['private_network']="br-private"
52 NET_MAP['public_network']="br-public"
53 NET_MAP['storage_network']="br-storage"
54 NET_MAP['api_network']="br-api"
55 ext_net_type="interface"
60 $LIB/common-functions.sh
61 $LIB/utility-functions.sh
62 $LIB/installer/onos/onos_gw_mac_update.sh
64 for lib_file in ${lib_files[@]}; do
65 if ! source $lib_file; then
66 echo -e "${red}ERROR: Failed to source $lib_file${reset}"
72 ##translates yaml into variables
73 ##params: filename, prefix (ex. "config_")
74 ##usage: parse_yaml opnfv_ksgen_settings.yml "config_"
77 local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034')
78 sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
79 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 |
81 indent = length($1)/2;
83 for (i in vname) {if (i > indent) {delete vname[i]}}
85 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
86 printf("%s%s%s=%s\n", "'$prefix'",vn, $2, $3);
91 ##checks if prefix exists in string
92 ##params: string, prefix
93 ##usage: contains_prefix "deploy_setting_launcher=1" "deploy_setting"
97 if echo $mystr | grep -E "^$prefix.*$" > /dev/null; then
103 ##parses variable from a string with '='
104 ##and removes global prefix
105 ##params: string, prefix
106 ##usage: parse_setting_var 'deploy_myvar=2' 'deploy_'
107 parse_setting_var() {
110 if echo $mystr | grep -E "^.+\=" > /dev/null; then
111 echo $(echo $mystr | grep -Eo "^.+\=" | tr -d '=' | sed 's/^'"$prefix"'//')
116 ##parses value from a string with '='
118 ##usage: parse_setting_value
119 parse_setting_value() {
121 echo $(echo $mystr | grep -Eo "\=.*$" | tr -d '=')
124 ##parses network settings yaml into globals
125 parse_network_settings() {
127 if output=$(python3.4 -B $LIB/python/apex-python-utils.py parse-net-settings -s $NETSETS -i $net_isolation_enabled -e $CONFIG/network-environment.yaml); then
128 echo -e "${blue}${output}${reset}"
131 echo -e "${red}ERROR: Failed to parse network settings file $NETSETS ${reset}"
136 ##parses deploy settings yaml into globals
137 parse_deploy_settings() {
139 if output=$(python3.4 -B $LIB/python/apex-python-utils.py parse-deploy-settings -f $DEPLOY_SETTINGS_FILE); then
140 echo -e "${blue}${output}${reset}"
143 echo -e "${red}ERROR: Failed to parse deploy settings file $DEPLOY_SETTINGS_FILE ${reset}"
147 if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
148 if [ "$net_isolation_enabled" == "FALSE" ]; then
149 echo -e "${red}ERROR: flat network is not supported with ovs-dpdk ${reset}"
152 if [[ ! $enabled_network_list =~ "private_network" ]]; then
153 echo -e "${red}ERROR: tenant network is not enabled for ovs-dpdk ${reset}"
159 ##parses baremetal yaml settings into compatible json
160 ##writes the json to $CONFIG/instackenv_tmp.json
162 ##usage: parse_inventory_file
163 parse_inventory_file() {
164 local inventory=$(parse_yaml $INVENTORY_FILE)
166 local node_prefix="node"
171 # detect number of nodes
172 for entry in $inventory; do
173 if echo $entry | grep -Eo "^nodes_node[0-9]+_" > /dev/null; then
174 this_node=$(echo $entry | grep -Eo "^nodes_node[0-9]+_")
175 if [[ "$inventory_list" != *"$this_node"* ]]; then
176 inventory_list+="$this_node "
181 inventory_list=$(echo $inventory_list | sed 's/ $//')
183 for node in $inventory_list; do
187 node_total=$node_count
189 if [[ "$node_total" -lt 5 && "$ha_enabled" == "True" ]]; then
190 echo -e "${red}ERROR: You must provide at least 5 nodes for HA baremetal deployment${reset}"
192 elif [[ "$node_total" -lt 2 ]]; then
193 echo -e "${red}ERROR: You must provide at least 2 nodes for non-HA baremetal deployment${reset}"
197 eval $(parse_yaml $INVENTORY_FILE) || {
198 echo "${red}Failed to parse inventory.yaml. Aborting.${reset}"
208 for node in $inventory_list; do
212 \"pm_password\": \"$(eval echo \${${node}ipmi_pass})\",
213 \"pm_type\": \"$(eval echo \${${node}pm_type})\",
215 \"$(eval echo \${${node}mac_address})\"
217 \"cpu\": \"$(eval echo \${${node}cpus})\",
218 \"memory\": \"$(eval echo \${${node}memory})\",
219 \"disk\": \"$(eval echo \${${node}disk})\",
220 \"arch\": \"$(eval echo \${${node}arch})\",
221 \"pm_user\": \"$(eval echo \${${node}ipmi_user})\",
222 \"pm_addr\": \"$(eval echo \${${node}ipmi_ip})\",
223 \"capabilities\": \"$(eval echo \${${node}capabilities})\"
225 instackenv_output+=${node_output}
226 if [ $node_count -lt $node_total ]; then
227 instackenv_output+=" },"
229 instackenv_output+=" }"
237 #Copy instackenv.json to undercloud for baremetal
238 echo -e "{blue}Parsed instackenv JSON:\n${instackenv_output}${reset}"
239 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
240 cat > instackenv.json << EOF
246 ##verify internet connectivity
248 function verify_internet {
249 if ping -c 2 $ping_site > /dev/null; then
250 if ping -c 2 www.google.com > /dev/null; then
251 echo "${blue}Internet connectivity detected${reset}"
254 echo "${red}Internet connectivity detected, but DNS lookup failed${reset}"
258 echo "${red}No internet connectivity detected${reset}"
263 ##download dependencies if missing and configure host
265 function configure_deps {
266 if ! verify_internet; then
267 echo "${red}Will not download dependencies${reset}"
271 # verify ip forwarding
272 if sysctl net.ipv4.ip_forward | grep 0; then
273 sudo sysctl -w net.ipv4.ip_forward=1
274 sudo sh -c "echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf"
277 # ensure no dhcp server is running on jumphost
278 if ! sudo systemctl status dhcpd | grep dead; then
279 echo "${red}WARN: DHCP Server detected on jumphost, disabling...${reset}"
280 sudo systemctl stop dhcpd
281 sudo systemctl disable dhcpd
284 # ensure networks are configured
285 systemctl status libvirtd || systemctl start libvirtd
286 systemctl status openvswitch || systemctl start openvswitch
288 # If flat we only use admin network
289 if [[ "$net_isolation_enabled" == "FALSE" ]]; then
290 virsh_enabled_networks="admin_network"
291 enabled_network_list="admin_network"
292 # For baremetal we only need to create/attach Undercloud to admin and public
293 elif [ "$virtual" == "FALSE" ]; then
294 virsh_enabled_networks="admin_network public_network"
296 virsh_enabled_networks=$enabled_network_list
299 # ensure default network is configured correctly
300 libvirt_dir="/usr/share/libvirt/networks"
301 virsh net-list --all | grep default || virsh net-define ${libvirt_dir}/default.xml
302 virsh net-list --all | grep -E "default\s+active" > /dev/null || virsh net-start default
303 virsh net-list --all | grep -E "default\s+active\s+yes" > /dev/null || virsh net-autostart --network default
305 if [[ -z "$virtual" || "$virtual" == "FALSE" ]]; then
306 for network in ${enabled_network_list}; do
307 echo "${blue}INFO: Creating Virsh Network: $network & OVS Bridge: ${NET_MAP[$network]}${reset}"
308 ovs-vsctl list-br | grep "^${NET_MAP[$network]}$" > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]}
309 virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
311 <name>$network</name>
312 <forward mode='bridge'/>
313 <bridge name='${NET_MAP[$network]}'/>
314 <virtualport type='openvswitch'/>
317 if ! (virsh net-list --all | grep $network > /dev/null); then
318 echo "${red}ERROR: unable to create network: ${network}${reset}"
321 rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null;
322 virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network
323 virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network
326 echo -e "${blue}INFO: Bridges set: ${reset}"
329 # bridge interfaces to correct OVS instances for baremetal deployment
330 for network in ${enabled_network_list}; do
331 if [[ "$network" != "admin_network" && "$network" != "public_network" ]]; then
334 this_interface=$(eval echo \${${network}_bridged_interface})
335 # check if this a bridged interface for this network
336 if [[ ! -z "$this_interface" || "$this_interface" != "none" ]]; then
337 if ! attach_interface_to_ovs ${NET_MAP[$network]} ${this_interface} ${network}; then
338 echo -e "${red}ERROR: Unable to bridge interface ${this_interface} to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
341 echo -e "${blue}INFO: Interface ${this_interface} bridged to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
344 echo "${red}ERROR: Unable to determine interface to bridge to for enabled network: ${network}${reset}"
349 for network in ${OPNFV_NETWORK_TYPES}; do
350 echo "${blue}INFO: Creating Virsh Network: $network${reset}"
351 virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
353 <name>$network</name>
354 <bridge name='${NET_MAP[$network]}'/>
357 if ! (virsh net-list --all | grep $network > /dev/null); then
358 echo "${red}ERROR: unable to create network: ${network}${reset}"
361 rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null;
362 virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network
363 virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network
366 echo -e "${blue}INFO: Bridges set: ${reset}"
370 echo -e "${blue}INFO: virsh networks set: ${reset}"
373 # ensure storage pool exists and is started
374 virsh pool-list --all | grep default > /dev/null || virsh pool-define-as --name default dir --target /var/lib/libvirt/images
375 virsh pool-list | grep -Eo "default\s+active" > /dev/null || (virsh pool-autostart default; virsh pool-start default)
377 if ! egrep '^flags.*(vmx|svm)' /proc/cpuinfo > /dev/null; then
378 echo "${red}virtualization extensions not found, kvm kernel module insertion may fail.\n \
379 Are you sure you have enabled vmx in your bios or hypervisor?${reset}"
382 if ! lsmod | grep kvm > /dev/null; then modprobe kvm; fi
383 if ! lsmod | grep kvm_intel > /dev/null; then modprobe kvm_intel; fi
385 if ! lsmod | grep kvm > /dev/null; then
386 echo "${red}kvm kernel modules not loaded!${reset}"
391 if [ ! -e ~/.ssh/id_rsa.pub ]; then
392 ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
395 echo "${blue}All dependencies installed and running${reset}"
398 ##verify vm exists, an has a dhcp lease assigned to it
400 function setup_undercloud_vm {
401 if ! virsh list --all | grep undercloud > /dev/null; then
402 undercloud_nets="default admin_network"
403 if [[ $enabled_network_list =~ "public_network" ]]; then
404 undercloud_nets+=" public_network"
406 define_vm undercloud hd 30 "$undercloud_nets" 4 12288
408 ### this doesn't work for some reason I was getting hangup events so using cp instead
409 #virsh vol-upload --pool default --vol undercloud.qcow2 --file $CONFIG/stack/undercloud.qcow2
410 #2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem <http://bugs.centos.org>, 2015-11-03-13:56:46, worker1.bsys.centos.org)
411 #2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
412 #2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
413 #error: cannot close volume undercloud.qcow2
414 #error: internal error: received hangup / error event on socket
415 #error: Reconnected to the hypervisor
417 local undercloud_dst=/var/lib/libvirt/images/undercloud.qcow2
418 cp -f $RESOURCES/undercloud.qcow2 $undercloud_dst
420 # resize Undercloud machine
421 echo "Checking if Undercloud needs to be resized..."
422 undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
423 if [ "$undercloud_size" -lt 30 ]; then
424 qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G
425 LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/undercloud.qcow2 $undercloud_dst
426 LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
427 new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
428 if [ "$new_size" -lt 30 ]; then
429 echo "Error resizing Undercloud machine, disk size is ${new_size}"
432 echo "Undercloud successfully resized"
435 echo "Skipped Undercloud resize, upstream is large enough"
439 echo "Found Undercloud VM, using existing VM"
442 # if the VM is not running update the authkeys and start it
443 if ! virsh list | grep undercloud > /dev/null; then
444 echo "Injecting ssh key to Undercloud VM"
445 LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command "mkdir -p /root/.ssh/" \
446 --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
447 --run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
448 --run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
449 --run-command "chown stack:stack /home/stack/.ssh/authorized_keys && chmod 600 /home/stack/.ssh/authorized_keys"
450 virsh start undercloud
453 sleep 10 # let undercloud get started up
455 # get the undercloud VM IP
457 echo -n "${blue}Waiting for Undercloud's dhcp address${reset}"
458 undercloud_mac=$(virsh domiflist undercloud | grep default | awk '{ print $5 }')
459 while ! $(arp -e | grep ${undercloud_mac} > /dev/null) && [ $CNT -gt 0 ]; do
464 UNDERCLOUD=$(arp -e | grep ${undercloud_mac} | awk {'print $1'})
466 if [ -z "$UNDERCLOUD" ]; then
467 echo "\n\nCan't get IP for Undercloud. Can Not Continue."
470 echo -e "${blue}\rUndercloud VM has IP $UNDERCLOUD${reset}"
474 echo -en "${blue}\rValidating Undercloud VM connectivity${reset}"
475 while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do
480 if [ "$CNT" -eq 0 ]; then
481 echo "Failed to contact Undercloud. Can Not Continue"
485 while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do
490 if [ "$CNT" -eq 0 ]; then
491 echo "Failed to connect to Undercloud. Can Not Continue"
495 # extra space to overwrite the previous connectivity output
496 echo -e "${blue}\r ${reset}"
498 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth2 | grep ${public_network_provisioner_ip} > /dev/null; then ip a a ${public_network_provisioner_ip}/${public_network_cidr##*/} dev eth2; ip link set up dev eth2; fi"
500 # ssh key fix for stack user
501 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack"
504 ##Create virtual nodes in virsh
505 ##params: vcpus, ramsize
506 function setup_virtual_baremetal {
511 elif [ -z "$2" ]; then
518 #start by generating the opening json for instackenv.json
519 cat > $CONFIG/instackenv-virt.json << EOF
524 # next create the virtual machines and add their definitions to the file
525 if [ "$ha_enabled" == "False" ]; then
526 # 1 controller + computes
527 # zero based so just pass compute count
528 vm_index=$VM_COMPUTES
530 # 3 controller + computes
531 # zero based so add 2 to compute count
532 vm_index=$((2+$VM_COMPUTES))
535 for i in $(seq 0 $vm_index); do
536 if ! virsh list --all | grep baremetal${i} > /dev/null; then
537 define_vm baremetal${i} network 41 'admin_network' $vcpus $ramsize
538 for n in private_network public_network storage_network api_network; do
539 if [[ $enabled_network_list =~ $n ]]; then
541 virsh attach-interface --domain baremetal${i} --type network --source $n --model virtio --config
545 echo "Found Baremetal ${i} VM, using existing VM"
547 #virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2
548 mac=$(virsh domiflist baremetal${i} | grep admin_network | awk '{ print $5 }')
550 if [ "$VM_COMPUTES" -gt 0 ]; then
551 capability="profile:compute"
552 VM_COMPUTES=$((VM_COMPUTES - 1))
554 capability="profile:control"
557 cat >> $CONFIG/instackenv-virt.json << EOF
559 "pm_addr": "192.168.122.1",
561 "pm_password": "INSERT_STACK_USER_PRIV_KEY",
562 "pm_type": "pxe_ssh",
567 "memory": "$ramsize",
570 "capabilities": "$capability"
575 #truncate the last line to remove the comma behind the bracket
576 tail -n 1 $CONFIG/instackenv-virt.json | wc -c | xargs -I {} truncate $CONFIG/instackenv-virt.json -s -{}
578 #finally reclose the bracket and close the instackenv.json file
579 cat >> $CONFIG/instackenv-virt.json << EOF
583 "host-ip": "192.168.122.1",
584 "power_manager": "nova.virt.baremetal.virtual_power_driver.VirtualPowerManager",
586 "ssh-key": "INSERT_STACK_USER_PRIV_KEY",
590 #Overwrite the tripleo-inclubator domain.xml with our own, keeping a backup.
591 if [ ! -f /usr/share/tripleo/templates/domain.xml.bak ]; then
592 /usr/bin/mv -f /usr/share/tripleo/templates/domain.xml /usr/share/tripleo/templates/domain.xml.bak
595 /usr/bin/cp -f $LIB/installer/domain.xml /usr/share/tripleo/templates/domain.xml
598 ##Create virtual nodes in virsh
599 ##params: name - String: libvirt name for VM
600 ## bootdev - String: boot device for the VM
601 ## disksize - Number: size of the disk in GB
602 ## ovs_bridges: - List: list of ovs bridges
603 ## vcpus - Number of VCPUs to use (defaults to 4)
604 ## ramsize - Size of RAM for VM in MB (defaults to 8192)
605 function define_vm () {
611 elif [ -z "$6" ]; then
619 # Create the libvirt storage volume
620 if virsh vol-list default | grep ${1}.qcow2 2>&1> /dev/null; then
621 volume_path=$(virsh vol-path --pool default ${1}.qcow2 || echo "/var/lib/libvirt/images/${1}.qcow2")
622 echo "Volume ${1} exists. Deleting Existing Volume $volume_path"
623 virsh vol-dumpxml ${1}.qcow2 --pool default > /dev/null || echo '' #ok for this to fail
625 virsh vol-delete ${1}.qcow2 --pool default
627 virsh vol-create-as default ${1}.qcow2 ${3}G --format qcow2
628 volume_path=$(virsh vol-path --pool default ${1}.qcow2)
629 if [ ! -f $volume_path ]; then
630 echo "$volume_path Not created successfully... Aborting"
635 /usr/libexec/openstack-tripleo/configure-vm --name $1 \
637 --image "$volume_path" \
642 --libvirt-nic-driver virtio \
643 --baremetal-interface $4
646 ##Copy over the glance images and instackenv json file
648 function configure_undercloud {
649 local controller_nic_template compute_nic_template
651 echo "Copying configuration files to Undercloud"
652 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
653 echo -e "${blue}Network Environment set for Deployment: ${reset}"
654 cat /tmp/network-environment.yaml
655 scp ${SSH_OPTIONS[@]} /tmp/network-environment.yaml "stack@$UNDERCLOUD":
657 # check for ODL L3/ONOS
658 if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
662 if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
663 ovs_dpdk_bridge='br-phy'
668 if ! controller_nic_template=$(python3.4 -B $LIB/python/apex-python-utils.py nic-template -t $CONFIG/nics-controller.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family); then
669 echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
673 if ! compute_nic_template=$(python3.4 -B $LIB/python/apex-python-utils.py nic-template -t $CONFIG/nics-compute.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family -d "$ovs_dpdk_bridge"); then
674 echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
677 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
679 cat > nics/controller.yaml << EOF
680 $controller_nic_template
682 cat > nics/compute.yaml << EOF
683 $compute_nic_template
688 # ensure stack user on Undercloud machine has an ssh key
689 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
691 if [ "$virtual" == "TRUE" ]; then
693 # copy the Undercloud VM's stack user's pub key to
694 # root's auth keys so that Undercloud can control
695 # vm power on the hypervisor
696 ssh ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> /root/.ssh/authorized_keys
698 DEPLOY_OPTIONS+=" --libvirt-type qemu"
699 INSTACKENV=$CONFIG/instackenv-virt.json
701 # upload instackenv file to Undercloud for virtual deployment
702 scp ${SSH_OPTIONS[@]} $INSTACKENV "stack@$UNDERCLOUD":instackenv.json
705 # allow stack to control power management on the hypervisor via sshkey
706 # only if this is a virtual deployment
707 if [ "$virtual" == "TRUE" ]; then
708 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
709 while read -r line; do
710 stack_key=\${stack_key}\\\\\\\\n\${line}
711 done < <(cat ~/.ssh/id_rsa)
712 stack_key=\$(echo \$stack_key | sed 's/\\\\\\\\n//')
713 sed -i 's~INSERT_STACK_USER_PRIV_KEY~'"\$stack_key"'~' instackenv.json
717 # copy stack's ssh key to this users authorized keys
718 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> ~/.ssh/authorized_keys
720 # disable requiretty for sudo
721 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "sed -i 's/Defaults\s*requiretty//'" /etc/sudoers
723 # configure undercloud on Undercloud VM
724 echo "Running undercloud configuration."
725 echo "Logging undercloud configuration to undercloud:/home/stack/apex-undercloud-install.log"
726 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
727 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
728 sed -i 's/#local_ip/local_ip/' undercloud.conf
729 sed -i 's/#network_gateway/network_gateway/' undercloud.conf
730 sed -i 's/#network_cidr/network_cidr/' undercloud.conf
731 sed -i 's/#dhcp_start/dhcp_start/' undercloud.conf
732 sed -i 's/#dhcp_end/dhcp_end/' undercloud.conf
733 sed -i 's/#inspection_iprange/inspection_iprange/' undercloud.conf
734 sed -i 's/#undercloud_debug/undercloud_debug/' undercloud.conf
736 openstack-config --set undercloud.conf DEFAULT local_ip ${admin_network_provisioner_ip}/${admin_network_cidr##*/}
737 openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_network_provisioner_ip}
738 openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_network_cidr}
739 openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_network_dhcp_range%%,*}
740 openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_network_dhcp_range##*,}
741 openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_network_introspection_range}
742 openstack-config --set undercloud.conf DEFAULT undercloud_debug false
746 sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
747 sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
748 sudo sed -i '/CephAdminKey:/c\\ CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
750 # we assume that packages will not need to be updated with undercloud install
751 # and that it will be used only to configure the undercloud
752 # packages updates would need to be handled manually with yum update
753 sudo cp -f /usr/share/diskimage-builder/elements/yum/bin/install-packages /usr/share/diskimage-builder/elements/yum/bin/install-packages.bak
754 cat << 'EOF' | sudo tee /usr/share/diskimage-builder/elements/yum/bin/install-packages > /dev/null
759 openstack undercloud install &> apex-undercloud-install.log || {
760 # cat the undercloud install log incase it fails
761 echo "ERROR: openstack undercloud install has failed. Dumping Log:"
762 cat apex-undercloud-install.log
767 sudo systemctl restart openstack-glance-api
768 sudo systemctl restart openstack-nova-conductor
769 sudo systemctl restart openstack-nova-compute
771 sudo sed -i '/num_engine_workers/c\num_engine_workers = 2' /etc/heat/heat.conf
772 sudo sed -i '/#workers\s=/c\workers = 2' /etc/heat/heat.conf
773 sudo systemctl restart openstack-heat-engine
774 sudo systemctl restart openstack-heat-api
776 # WORKAROUND: must restart the above services to fix sync problem with nova compute manager
777 # TODO: revisit and file a bug if necessary. This should eventually be removed
778 # as well as glance api problem
779 echo -e "${blue}INFO: Sleeping 15 seconds while services come back from restart${reset}"
784 ##preping it for deployment and launch the deploy
786 function undercloud_prep_overcloud_deploy {
787 if [[ "${#deploy_options_array[@]}" -eq 0 || "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
788 if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
789 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_l3.yaml"
790 elif [ "${deploy_options_array['sfc']}" == 'True' ]; then
791 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sfc.yaml"
792 elif [ "${deploy_options_array['vpn']}" == 'True' ]; then
793 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sdnvpn.yaml"
794 elif [ "${deploy_options_array['vpp']}" == 'True' ]; then
795 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_fdio.yaml"
797 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight.yaml"
799 SDN_IMAGE=opendaylight
800 if [ "${deploy_options_array['sfc']}" == 'True' ]; then
802 if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
803 echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute an SFC deployment."
804 echo "Please install the opnfv-apex-opendaylight-sfc package to provide this overcloud image for deployment.${reset}"
808 elif [ "${deploy_options_array['sdn_controller']}" == 'opendaylight-external' ]; then
809 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight-external.yaml"
810 SDN_IMAGE=opendaylight
811 elif [ "${deploy_options_array['sdn_controller']}" == 'onos' ]; then
812 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/onos.yaml"
814 elif [ "${deploy_options_array['sdn_controller']}" == 'opencontrail' ]; then
815 echo -e "${red}ERROR: OpenContrail is currently unsupported...exiting${reset}"
817 elif [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then
818 echo -e "${blue}INFO: SDN Controller disabled...will deploy nosdn scenario${reset}"
819 SDN_IMAGE=opendaylight
821 echo "${red}Invalid sdn_controller: ${deploy_options_array['sdn_controller']}${reset}"
822 echo "${red}Valid choices are opendaylight, opendaylight-external, onos, opencontrail, False, or null${reset}"
828 # Make sure the correct overcloud image is available
829 if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
830 echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
831 echo "Both ONOS and OpenDaylight are currently deployed from this image."
832 echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}"
836 echo "Copying overcloud image to Undercloud"
837 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
838 scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
840 # Install ovs-dpdk inside the overcloud image if it is enabled.
841 if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
842 # install dpdk packages before ovs
843 echo -e "${blue}INFO: Enabling kernel modules for dpdk inside overcloud image${reset}"
845 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
846 cat << EOF > vfio_pci.modules
848 exec /sbin/modprobe vfio_pci >/dev/null 2>&1
851 cat << EOF > uio_pci_generic.modules
853 exec /sbin/modprobe uio_pci_generic >/dev/null 2>&1
856 LIBGUESTFS_BACKEND=direct virt-customize --upload vfio_pci.modules:/etc/sysconfig/modules/ \
857 --upload uio_pci_generic.modules:/etc/sysconfig/modules/ \
858 --run-command "chmod 0755 /etc/sysconfig/modules/vfio_pci.modules" \
859 --run-command "chmod 0755 /etc/sysconfig/modules/uio_pci_generic.modules" \
860 --run-command "yum install -y /root/dpdk_rpms/*" \
861 -a overcloud-full.qcow2
863 elif [ "${deploy_options_array['dataplane']}" != 'ovs' ]; then
864 echo "${red}${deploy_options_array['dataplane']} not supported${reset}"
868 # Set ODL version accordingly
869 if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "${deploy_options_array['odl_version']}" == 'boron' ]]; then
870 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
871 LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight" \
872 --run-command "yum -y install /root/boron/*" \
873 -a overcloud-full.qcow2
877 # Add performance deploy options if they have been set
878 if [ ! -z "${deploy_options_array['performance']}" ]; then
880 # Remove previous kernel args files per role
881 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f Compute-kernel_params.txt"
882 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f Controller-kernel_params.txt"
884 # Push performance options to subscript to modify per-role images as needed
885 for option in "${performance_options[@]}" ; do
886 echo -e "${blue}Setting performance option $option${reset}"
887 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "bash build_perf_image.sh $option"
890 # Build IPA kernel option ramdisks
891 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
892 /bin/cp -f /home/stack/ironic-python-agent.initramfs /root/
895 gunzip -c ../ironic-python-agent.initramfs | cpio -i
896 if [ ! -f /home/stack/Compute-kernel_params.txt ]; then
897 touch /home/stack/Compute-kernel_params.txt
898 chown stack /home/stack/Compute-kernel_params.txt
900 /bin/cp -f /home/stack/Compute-kernel_params.txt tmp/kernel_params.txt
901 echo "Compute params set: "
902 cat tmp/kernel_params.txt
903 /bin/cp -f /root/image.py usr/lib/python2.7/site-packages/ironic_python_agent/extensions/image.py
904 /bin/cp -f /root/image.pyc usr/lib/python2.7/site-packages/ironic_python_agent/extensions/image.pyc
905 find . | cpio -o -H newc | gzip > /home/stack/Compute-ironic-python-agent.initramfs
906 chown stack /home/stack/Compute-ironic-python-agent.initramfs
907 if [ ! -f /home/stack/Controller-kernel_params.txt ]; then
908 touch /home/stack/Controller-kernel_params.txt
909 chown stack /home/stack/Controller-kernel_params.txt
911 /bin/cp -f /home/stack/Controller-kernel_params.txt tmp/kernel_params.txt
912 echo "Controller params set: "
913 cat tmp/kernel_params.txt
914 find . | cpio -o -H newc | gzip > /home/stack/Controller-ironic-python-agent.initramfs
915 chown stack /home/stack/Controller-ironic-python-agent.initramfs
920 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml"
923 # make sure ceph is installed
924 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
926 # scale compute nodes according to inventory
927 total_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/instackenv.json | grep -c memory")
929 # check if HA is enabled
930 if [[ "$ha_enabled" == "True" ]]; then
931 DEPLOY_OPTIONS+=" --control-scale 3"
932 compute_nodes=$((total_nodes - 3))
933 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml"
935 compute_nodes=$((total_nodes - 1))
938 if [ "$compute_nodes" -le 0 ]; then
939 echo -e "${red}ERROR: Invalid number of compute nodes: ${compute_nodes}. Check your inventory file.${reset}"
942 echo -e "${blue}INFO: Number of compute nodes set for deployment: ${compute_nodes}${reset}"
943 DEPLOY_OPTIONS+=" --compute-scale ${compute_nodes}"
946 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
947 #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
948 DEPLOY_OPTIONS+=" -e network-environment.yaml"
951 if [[ "$ha_enabled" == "True" ]] || [[ "$net_isolation_enabled" == "TRUE" ]]; then
952 DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
955 if [[ ! "$virtual" == "TRUE" ]]; then
956 DEPLOY_OPTIONS+=" --control-flavor control --compute-flavor compute"
958 DEPLOY_OPTIONS+=" -e virtual-environment.yaml"
961 DEPLOY_OPTIONS+=" -e opnfv-environment.yaml"
963 echo -e "${blue}INFO: Deploy options set:\n${DEPLOY_OPTIONS}${reset}"
965 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
966 if [ "$debug" == 'TRUE' ]; then
967 LIBGUESTFS_BACKEND=direct virt-customize -a overcloud-full.qcow2 --root-password password:opnfvapex
972 echo "Uploading overcloud glance images"
973 openstack overcloud image upload
975 echo "Configuring undercloud and discovering nodes"
976 openstack baremetal import --json instackenv.json
977 openstack baremetal configure boot
978 bash -x set_perf_images.sh ${performance_roles[@]}
979 #if [[ -z "$virtual" ]]; then
980 # openstack baremetal introspection bulk start
982 echo "Configuring flavors"
983 for flavor in baremetal control compute; do
984 echo -e "${blue}INFO: Updating flavor: \${flavor}${reset}"
985 if openstack flavor list | grep \${flavor}; then
986 openstack flavor delete \${flavor}
988 openstack flavor create --id auto --ram 4096 --disk 39 --vcpus 1 \${flavor}
989 if ! openstack flavor list | grep \${flavor}; then
990 echo -e "${red}ERROR: Unable to create flavor \${flavor}${reset}"
993 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal
994 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="control" control
995 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="compute" compute
996 echo "Configuring nameserver on ctlplane network"
998 for dns_server in ${dns_servers}; do
999 dns_server_ext="\${dns_server_ext} --dns-nameserver \${dns_server}"
1001 neutron subnet-update \$(neutron subnet-list | grep -Ev "id|tenant|external|storage" | grep -v \\\\-\\\\- | awk {'print \$2'}) \${dns_server_ext}
1002 echo "Executing overcloud deployment, this should run for an extended period without output."
1003 sleep 60 #wait for Hypervisor stats to check-in to nova
1004 # save deploy command so it can be used for debugging
1005 cat > deploy_command << EOF
1006 openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
1010 if [ "$interactive" == "TRUE" ]; then
1011 if ! prompt_user "Overcloud Deployment"; then
1012 echo -e "${blue}INFO: User requests exit${reset}"
1017 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
1019 openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
1020 if ! heat stack-list | grep CREATE_COMPLETE 1>/dev/null; then
1021 $(typeset -f debug_stack)
1028 if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
1029 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI || (echo "DPDK config failed, exiting..."; exit 1)
1032 for node in \$(nova list | grep novacompute | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
1033 echo "Running DPDK test app on \$node"
1034 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
1036 sudo dpdk_helloworld --no-pci
1037 sudo dpdk_nic_bind -s
1043 if [ "$debug" == 'TRUE' ]; then
1044 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
1046 echo "Keystone Endpoint List:"
1047 openstack endpoint list
1048 echo "Keystone Service List"
1049 openstack service list
1050 cinder quota-show \$(openstack project list | grep admin | awk {'print \$2'})
1055 ##Post configuration after install
1057 function configure_post_install {
1058 local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip
1059 opnfv_attach_networks="admin_network public_network"
1061 echo -e "${blue}INFO: Post Install Configuration Running...${reset}"
1063 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
1066 echo "Configuring Neutron external network"
1067 neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }')
1068 neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
1070 echo "Removing swift endpoint and service"
1071 swift_service_id=\$(openstack service list | grep swift | cut -d ' ' -f 2)
1072 swift_endpoint_id=\$(openstack endpoint list | grep swift | cut -d ' ' -f 2)
1073 openstack endpoint delete \$swift_endpoint_id
1074 openstack service delete \$swift_service_id
1077 echo -e "${blue}INFO: Checking if OVS bridges have IP addresses...${reset}"
1078 for network in ${opnfv_attach_networks}; do
1079 ovs_ip=$(find_ip ${NET_MAP[$network]})
1081 if [ -n "$ovs_ip" ]; then
1082 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} has IP address ${ovs_ip}${reset}"
1084 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} missing IP, will configure${reset}"
1085 # use last IP of allocation pool
1086 eval "ip_range=\${${network}_usable_ip_range}"
1087 ovs_ip=${ip_range##*,}
1088 eval "net_cidr=\${${network}_cidr}"
1089 sudo ip addr add ${ovs_ip}/${net_cidr##*/} dev ${NET_MAP[$network]}
1090 sudo ip link set up ${NET_MAP[$network]}
1091 tmp_ip=$(find_ip ${NET_MAP[$network]})
1092 if [ -n "$tmp_ip" ]; then
1093 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} IP set: ${tmp_ip}${reset}"
1096 echo -e "${red}ERROR: Unable to set OVS Bridge ${NET_MAP[$network]} with IP: ${ovs_ip}${reset}"
1102 # for virtual, we NAT public network through Undercloud
1103 if [ "$virtual" == "TRUE" ]; then
1104 if ! configure_undercloud_nat ${public_network_cidr}; then
1105 echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${public_network_cidr}${reset}"
1108 echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud public network${reset}"
1112 # for sfc deployments we need the vxlan workaround
1113 if [ "${deploy_options_array['sfc']}" == 'True' ]; then
1114 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
1117 for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
1118 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
1119 sudo ifconfig br-int up
1120 sudo ip route add 123.123.123.0/24 dev br-int
1126 # Collect deployment logs
1127 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
1128 mkdir -p ~/deploy_logs
1129 rm -rf deploy_logs/*
1132 for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
1133 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
1134 sudo cp /var/log/messages /home/heat-admin/messages.log
1135 sudo chown heat-admin /home/heat-admin/messages.log
1137 scp ${SSH_OPTIONS[@]} heat-admin@\$node:/home/heat-admin/messages.log ~/deploy_logs/\$node.messages.log
1138 if [ "$debug" == "TRUE" ]; then
1139 nova list --ip \$node
1140 echo "---------------------------"
1141 echo "-----/var/log/messages-----"
1142 echo "---------------------------"
1143 cat ~/deploy_logs/\$node.messages.log
1144 echo "---------------------------"
1145 echo "----------END LOG----------"
1146 echo "---------------------------"
1148 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
1149 sudo rm -f /home/heat-admin/messages.log
1153 # Print out the undercloud IP and dashboard URL
1155 echo "Undercloud IP: $UNDERCLOUD, please connect by doing 'opnfv-util undercloud'"
1156 echo "Overcloud dashboard available at http://\$(heat output-show overcloud PublicVip | sed 's/"//g')/dashboard"
1162 echo -e "Usage:\n$0 [arguments] \n"
1163 echo -e " -d|--deploy-settings : Full path to deploy settings yaml file. Optional. Defaults to null"
1164 echo -e " -i|--inventory : Full path to inventory yaml file. Required only for baremetal"
1165 echo -e " -n|--net-settings : Full path to network settings file. Optional."
1166 echo -e " -p|--ping-site : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8"
1167 echo -e " -v|--virtual : Virtualize overcloud nodes instead of using baremetal."
1168 echo -e " --flat : disable Network Isolation and use a single flat network for the underlay network."
1169 echo -e " --no-post-config : disable Post Install configuration."
1170 echo -e " --debug : enable debug output."
1171 echo -e " --interactive : enable interactive deployment mode which requires user to confirm steps of deployment."
1172 echo -e " --virtual-cpus : Number of CPUs to use per Overcloud VM in a virtual deployment (defaults to 4)."
1173 echo -e " --virtual-ram : Amount of RAM to use per Overcloud VM in GB (defaults to 8)."
1176 ##translates the command line parameters into variables
1177 ##params: $@ the entire command line is passed
1178 ##usage: parse_cmd_line() "$@"
1180 echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
1181 echo "Use -h to display help"
1184 while [ "${1:0:1}" = "-" ]
1191 -d|--deploy-settings)
1192 DEPLOY_SETTINGS_FILE=$2
1193 echo "Deployment Configuration file: $2"
1202 echo "Network Settings Configuration file: $2"
1207 echo "Using $2 as the ping site"
1212 echo "Executing a Virtual Deployment"
1216 net_isolation_enabled="FALSE"
1217 echo "Underlay Network Isolation Disabled: using flat configuration"
1222 echo "Post install configuration disabled"
1227 echo "Enable debug output"
1232 echo "Interactive mode enabled"
1237 echo "Number of CPUs per VM set to $VM_CPUS"
1242 echo "Amount of RAM per VM set to $VM_RAM"
1245 --virtual-computes )
1247 echo "Virtual Compute nodes set to $VM_COMPUTES"
1257 if [[ ! -z "$NETSETS" && "$net_isolation_enabled" == "FALSE" ]]; then
1258 echo -e "${red}INFO: Single flat network requested. Only admin_network settings will be used!${reset}"
1259 elif [[ -z "$NETSETS" ]]; then
1260 echo -e "${red}ERROR: You must provide a network_settings file with -n.${reset}"
1264 if [[ -n "$virtual" && -n "$INVENTORY_FILE" ]]; then
1265 echo -e "${red}ERROR: You should not specify an inventory with virtual deployments${reset}"
1269 if [[ -z "$DEPLOY_SETTINGS_FILE" || ! -f "$DEPLOY_SETTINGS_FILE" ]]; then
1270 echo -e "${red}ERROR: Deploy Settings: ${DEPLOY_SETTINGS_FILE} does not exist! Exiting...${reset}"
1274 if [[ ! -z "$NETSETS" && ! -f "$NETSETS" ]]; then
1275 echo -e "${red}ERROR: Network Settings: ${NETSETS} does not exist! Exiting...${reset}"
1279 if [[ ! -z "$INVENTORY_FILE" && ! -f "$INVENTORY_FILE" ]]; then
1280 echo -e "{$red}ERROR: Inventory File: ${INVENTORY_FILE} does not exist! Exiting...${reset}"
1284 if [[ -z "$virtual" && -z "$INVENTORY_FILE" ]]; then
1285 echo -e "${red}ERROR: You must specify an inventory file for baremetal deployments! Exiting...${reset}"
1289 if [[ "$net_isolation_enabled" == "FALSE" && "$post_config" == "TRUE" ]]; then
1290 echo -e "${blue}INFO: Post Install Configuration will be skipped. It is not supported with --flat${reset}"
1300 echo -e "${blue}INFO: Parsing network settings file...${reset}"
1301 parse_network_settings
1302 if ! configure_deps; then
1303 echo -e "${red}Dependency Validation Failed, Exiting.${reset}"
1306 if [ -n "$DEPLOY_SETTINGS_FILE" ]; then
1307 echo -e "${blue}INFO: Parsing deploy settings file...${reset}"
1308 parse_deploy_settings
1311 if [ "$virtual" == "TRUE" ]; then
1312 setup_virtual_baremetal $VM_CPUS $VM_RAM
1313 elif [ -n "$INVENTORY_FILE" ]; then
1314 parse_inventory_file
1316 configure_undercloud
1317 undercloud_prep_overcloud_deploy
1318 if [ "$post_config" == "TRUE" ]; then
1319 if ! configure_post_install; then
1320 echo -e "${red}ERROR:Post Install Configuration Failed, Exiting.${reset}"
1323 echo -e "${blue}INFO: Post Install Configuration Complete${reset}"
1326 if [[ "${deploy_options_array['sdn_controller']}" == 'onos' ]]; then
1327 if ! onos_update_gw_mac ${public_network_cidr} ${public_network_gateway}; then
1328 echo -e "${red}ERROR:ONOS Post Install Configuration Failed, Exiting.${reset}"
1331 echo -e "${blue}INFO: ONOS Post Install Configuration Complete${reset}"