2 ##############################################################################
3 # Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
11 # Deploy script to install provisioning server for OPNFV Apex
12 # author: Dan Radez (dradez@redhat.com)
13 # author: Tim Rozet (trozet@redhat.com)
15 # Based on RDO Manager http://www.rdoproject.org
20 reset=$(tput sgr0 || echo "")
21 blue=$(tput setaf 4 || echo "")
22 red=$(tput setaf 1 || echo "")
23 green=$(tput setaf 2 || echo "")
27 ntp_server="pool.ntp.org"
28 net_isolation_enabled="TRUE"
34 declare -A deploy_options_array
35 declare -a performance_options
38 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
40 CONFIG=${CONFIG:-'/var/opt/opnfv'}
41 RESOURCES=${RESOURCES:-"$CONFIG/images"}
42 LIB=${LIB:-"$CONFIG/lib"}
43 OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network"
49 # Netmap used to map networks to OVS bridge names
50 NET_MAP['admin_network']="br-admin"
51 NET_MAP['private_network']="br-private"
52 NET_MAP['public_network']="br-public"
53 NET_MAP['storage_network']="br-storage"
54 ext_net_type="interface"
58 source $LIB/common-functions.sh
59 source $LIB/utility-functions.sh
60 source $LIB/installer/onos/onos_gw_mac_update.sh
63 ##translates yaml into variables
64 ##params: filename, prefix (ex. "config_")
65 ##usage: parse_yaml opnfv_ksgen_settings.yml "config_"
68 local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034')
69 sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
70 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 |
72 indent = length($1)/2;
74 for (i in vname) {if (i > indent) {delete vname[i]}}
76 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
77 printf("%s%s%s=%s\n", "'$prefix'",vn, $2, $3);
82 ##checks if prefix exists in string
83 ##params: string, prefix
84 ##usage: contains_prefix "deploy_setting_launcher=1" "deploy_setting"
88 if echo $mystr | grep -E "^$prefix.*$" > /dev/null; then
94 ##parses variable from a string with '='
95 ##and removes global prefix
96 ##params: string, prefix
97 ##usage: parse_setting_var 'deploy_myvar=2' 'deploy_'
101 if echo $mystr | grep -E "^.+\=" > /dev/null; then
102 echo $(echo $mystr | grep -Eo "^.+\=" | tr -d '=' | sed 's/^'"$prefix"'//')
107 ##parses value from a string with '='
109 ##usage: parse_setting_value
110 parse_setting_value() {
112 echo $(echo $mystr | grep -Eo "\=.*$" | tr -d '=')
115 ##parses network settings yaml into globals
116 parse_network_settings() {
118 if output=$(python3.4 -B $LIB/python/apex-python-utils.py parse-net-settings -s $NETSETS -i $net_isolation_enabled -e $CONFIG/network-environment.yaml); then
119 echo -e "${blue}${output}${reset}"
122 echo -e "${red}ERROR: Failed to parse network settings file $NETSETS ${reset}"
127 ##parses deploy settings yaml into globals
128 parse_deploy_settings() {
130 if output=$(python3.4 -B $LIB/python/apex-python-utils.py parse-deploy-settings -f $DEPLOY_SETTINGS_FILE); then
131 echo -e "${blue}${output}${reset}"
134 echo -e "${red}ERROR: Failed to parse deploy settings file $DEPLOY_SETTINGS_FILE ${reset}"
139 ##parses baremetal yaml settings into compatible json
140 ##writes the json to $CONFIG/instackenv_tmp.json
142 ##usage: parse_inventory_file
143 parse_inventory_file() {
144 local inventory=$(parse_yaml $INVENTORY_FILE)
146 local node_prefix="node"
151 # detect number of nodes
152 for entry in $inventory; do
153 if echo $entry | grep -Eo "^nodes_node[0-9]+_" > /dev/null; then
154 this_node=$(echo $entry | grep -Eo "^nodes_node[0-9]+_")
155 if [[ "$inventory_list" != *"$this_node"* ]]; then
156 inventory_list+="$this_node "
161 inventory_list=$(echo $inventory_list | sed 's/ $//')
163 for node in $inventory_list; do
167 node_total=$node_count
169 if [[ "$node_total" -lt 5 && "$ha_enabled" == "True" ]]; then
170 echo -e "${red}ERROR: You must provide at least 5 nodes for HA baremetal deployment${reset}"
172 elif [[ "$node_total" -lt 2 ]]; then
173 echo -e "${red}ERROR: You must provide at least 2 nodes for non-HA baremetal deployment${reset}"
177 eval $(parse_yaml $INVENTORY_FILE) || {
178 echo "${red}Failed to parse inventory.yaml. Aborting.${reset}"
188 for node in $inventory_list; do
192 \"pm_password\": \"$(eval echo \${${node}ipmi_pass})\",
193 \"pm_type\": \"$(eval echo \${${node}pm_type})\",
195 \"$(eval echo \${${node}mac_address})\"
197 \"cpu\": \"$(eval echo \${${node}cpus})\",
198 \"memory\": \"$(eval echo \${${node}memory})\",
199 \"disk\": \"$(eval echo \${${node}disk})\",
200 \"arch\": \"$(eval echo \${${node}arch})\",
201 \"pm_user\": \"$(eval echo \${${node}ipmi_user})\",
202 \"pm_addr\": \"$(eval echo \${${node}ipmi_ip})\",
203 \"capabilities\": \"$(eval echo \${${node}capabilities})\"
205 instackenv_output+=${node_output}
206 if [ $node_count -lt $node_total ]; then
207 instackenv_output+=" },"
209 instackenv_output+=" }"
217 #Copy instackenv.json to undercloud for baremetal
218 echo -e "{blue}Parsed instackenv JSON:\n${instackenv_output}${reset}"
219 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
220 cat > instackenv.json << EOF
226 ##verify internet connectivity
228 function verify_internet {
229 if ping -c 2 $ping_site > /dev/null; then
230 if ping -c 2 www.google.com > /dev/null; then
231 echo "${blue}Internet connectivity detected${reset}"
234 echo "${red}Internet connectivity detected, but DNS lookup failed${reset}"
238 echo "${red}No internet connectivity detected${reset}"
243 ##download dependencies if missing and configure host
245 function configure_deps {
246 if ! verify_internet; then
247 echo "${red}Will not download dependencies${reset}"
251 # verify ip forwarding
252 if sysctl net.ipv4.ip_forward | grep 0; then
253 sudo sysctl -w net.ipv4.ip_forward=1
254 sudo sh -c "echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf"
257 # ensure no dhcp server is running on jumphost
258 if ! sudo systemctl status dhcpd | grep dead; then
259 echo "${red}WARN: DHCP Server detected on jumphost, disabling...${reset}"
260 sudo systemctl stop dhcpd
261 sudo systemctl disable dhcpd
264 # ensure networks are configured
265 systemctl status libvirtd || systemctl start libvirtd
266 systemctl status openvswitch || systemctl start openvswitch
268 # If flat we only use admin network
269 if [[ "$net_isolation_enabled" == "FALSE" ]]; then
270 virsh_enabled_networks="admin_network"
271 enabled_network_list="admin_network"
272 # For baremetal we only need to create/attach Undercloud to admin and public
273 elif [ "$virtual" == "FALSE" ]; then
274 virsh_enabled_networks="admin_network public_network"
276 virsh_enabled_networks=$enabled_network_list
279 # ensure default network is configured correctly
280 libvirt_dir="/usr/share/libvirt/networks"
281 virsh net-list --all | grep default || virsh net-define ${libvirt_dir}/default.xml
282 virsh net-list --all | grep -E "default\s+active" > /dev/null || virsh net-start default
283 virsh net-list --all | grep -E "default\s+active\s+yes" > /dev/null || virsh net-autostart --network default
285 if [[ -z "$virtual" || "$virtual" == "FALSE" ]]; then
286 for network in ${OPNFV_NETWORK_TYPES}; do
287 echo "${blue}INFO: Creating Virsh Network: $network & OVS Bridge: ${NET_MAP[$network]}${reset}"
288 ovs-vsctl list-br | grep "^${NET_MAP[$network]}$" > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]}
289 virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
291 <name>$network</name>
292 <forward mode='bridge'/>
293 <bridge name='${NET_MAP[$network]}'/>
294 <virtualport type='openvswitch'/>
297 if ! (virsh net-list --all | grep $network > /dev/null); then
298 echo "${red}ERROR: unable to create network: ${network}${reset}"
301 rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null;
302 virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network
303 virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network
306 echo -e "${blue}INFO: Bridges set: ${reset}"
309 # bridge interfaces to correct OVS instances for baremetal deployment
310 for network in ${enabled_network_list}; do
311 if [[ "$network" != "admin_network" && "$network" != "public_network" ]]; then
314 this_interface=$(eval echo \${${network}_bridged_interface})
315 # check if this a bridged interface for this network
316 if [[ ! -z "$this_interface" || "$this_interface" != "none" ]]; then
317 if ! attach_interface_to_ovs ${NET_MAP[$network]} ${this_interface} ${network}; then
318 echo -e "${red}ERROR: Unable to bridge interface ${this_interface} to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
321 echo -e "${blue}INFO: Interface ${this_interface} bridged to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
324 echo "${red}ERROR: Unable to determine interface to bridge to for enabled network: ${network}${reset}"
329 for network in ${OPNFV_NETWORK_TYPES}; do
330 echo "${blue}INFO: Creating Virsh Network: $network${reset}"
331 virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
333 <name>$network</name>
334 <bridge name='${NET_MAP[$network]}'/>
337 if ! (virsh net-list --all | grep $network > /dev/null); then
338 echo "${red}ERROR: unable to create network: ${network}${reset}"
341 rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null;
342 virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network
343 virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network
346 echo -e "${blue}INFO: Bridges set: ${reset}"
350 echo -e "${blue}INFO: virsh networks set: ${reset}"
353 # ensure storage pool exists and is started
354 virsh pool-list --all | grep default > /dev/null || virsh pool-define-as --name default dir --target /var/lib/libvirt/images
355 virsh pool-list | grep -Eo "default\s+active" > /dev/null || (virsh pool-autostart default; virsh pool-start default)
357 if ! egrep '^flags.*(vmx|svm)' /proc/cpuinfo > /dev/null; then
358 echo "${red}virtualization extensions not found, kvm kernel module insertion may fail.\n \
359 Are you sure you have enabled vmx in your bios or hypervisor?${reset}"
362 if ! lsmod | grep kvm > /dev/null; then modprobe kvm; fi
363 if ! lsmod | grep kvm_intel > /dev/null; then modprobe kvm_intel; fi
365 if ! lsmod | grep kvm > /dev/null; then
366 echo "${red}kvm kernel modules not loaded!${reset}"
371 if [ ! -e ~/.ssh/id_rsa.pub ]; then
372 ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
375 echo "${blue}All dependencies installed and running${reset}"
378 ##verify vm exists, an has a dhcp lease assigned to it
380 function setup_undercloud_vm {
381 if ! virsh list --all | grep undercloud > /dev/null; then
382 undercloud_nets="default admin_network"
383 if [[ $enabled_network_list =~ "public_network" ]]; then
384 undercloud_nets+=" public_network"
386 define_vm undercloud hd 30 "$undercloud_nets" 4 12288
388 ### this doesn't work for some reason I was getting hangup events so using cp instead
389 #virsh vol-upload --pool default --vol undercloud.qcow2 --file $CONFIG/stack/undercloud.qcow2
390 #2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem <http://bugs.centos.org>, 2015-11-03-13:56:46, worker1.bsys.centos.org)
391 #2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
392 #2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
393 #error: cannot close volume undercloud.qcow2
394 #error: internal error: received hangup / error event on socket
395 #error: Reconnected to the hypervisor
397 local undercloud_dst=/var/lib/libvirt/images/undercloud.qcow2
398 cp -f $RESOURCES/undercloud.qcow2 $undercloud_dst
400 # resize Undercloud machine
401 echo "Checking if Undercloud needs to be resized..."
402 undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
403 if [ "$undercloud_size" -lt 30 ]; then
404 qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G
405 LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/undercloud.qcow2 $undercloud_dst
406 LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
407 new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
408 if [ "$new_size" -lt 30 ]; then
409 echo "Error resizing Undercloud machine, disk size is ${new_size}"
412 echo "Undercloud successfully resized"
415 echo "Skipped Undercloud resize, upstream is large enough"
419 echo "Found Undercloud VM, using existing VM"
422 # if the VM is not running update the authkeys and start it
423 if ! virsh list | grep undercloud > /dev/null; then
424 echo "Injecting ssh key to Undercloud VM"
425 LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command "mkdir -p /root/.ssh/" \
426 --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
427 --run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
428 --run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
429 --run-command "chown stack:stack /home/stack/.ssh/authorized_keys && chmod 600 /home/stack/.ssh/authorized_keys"
430 virsh start undercloud
433 sleep 10 # let undercloud get started up
435 # get the undercloud VM IP
437 echo -n "${blue}Waiting for Undercloud's dhcp address${reset}"
438 undercloud_mac=$(virsh domiflist undercloud | grep default | awk '{ print $5 }')
439 while ! $(arp -e | grep ${undercloud_mac} > /dev/null) && [ $CNT -gt 0 ]; do
444 UNDERCLOUD=$(arp -e | grep ${undercloud_mac} | awk {'print $1'})
446 if [ -z "$UNDERCLOUD" ]; then
447 echo "\n\nCan't get IP for Undercloud. Can Not Continue."
450 echo -e "${blue}\rUndercloud VM has IP $UNDERCLOUD${reset}"
454 echo -en "${blue}\rValidating Undercloud VM connectivity${reset}"
455 while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do
460 if [ "$CNT" -eq 0 ]; then
461 echo "Failed to contact Undercloud. Can Not Continue"
465 while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do
470 if [ "$CNT" -eq 0 ]; then
471 echo "Failed to connect to Undercloud. Can Not Continue"
475 # extra space to overwrite the previous connectivity output
476 echo -e "${blue}\r ${reset}"
478 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth2 | grep ${public_network_provisioner_ip} > /dev/null; then ip a a ${public_network_provisioner_ip}/${public_network_cidr##*/} dev eth2; ip link set up dev eth2; fi"
480 # ssh key fix for stack user
481 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack"
484 ##Create virtual nodes in virsh
485 ##params: vcpus, ramsize
486 function setup_virtual_baremetal {
491 elif [ -z "$2" ]; then
498 #start by generating the opening json for instackenv.json
499 cat > $CONFIG/instackenv-virt.json << EOF
504 # next create the virtual machines and add their definitions to the file
505 if [ ha_enabled == "False" ]; then
506 # 1 controller + computes
507 # zero based so just pass compute count
508 vm_index=$VM_COMPUTES
510 # 3 controller + computes
511 # zero based so add 2 to compute count
512 vm_index=$((2+$VM_COMPUTES))
515 for i in $(seq 0 $vm_index); do
516 if ! virsh list --all | grep baremetal${i} > /dev/null; then
517 define_vm baremetal${i} network 41 'admin_network' $vcpus $ramsize
518 for n in private_network public_network storage_network; do
519 if [[ $enabled_network_list =~ $n ]]; then
521 virsh attach-interface --domain baremetal${i} --type network --source $n --model rtl8139 --config
525 echo "Found Baremetal ${i} VM, using existing VM"
527 #virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2
528 mac=$(virsh domiflist baremetal${i} | grep admin_network | awk '{ print $5 }')
530 cat >> $CONFIG/instackenv-virt.json << EOF
532 "pm_addr": "192.168.122.1",
534 "pm_password": "INSERT_STACK_USER_PRIV_KEY",
535 "pm_type": "pxe_ssh",
540 "memory": "$ramsize",
547 #truncate the last line to remove the comma behind the bracket
548 tail -n 1 $CONFIG/instackenv-virt.json | wc -c | xargs -I {} truncate $CONFIG/instackenv-virt.json -s -{}
550 #finally reclose the bracket and close the instackenv.json file
551 cat >> $CONFIG/instackenv-virt.json << EOF
555 "host-ip": "192.168.122.1",
556 "power_manager": "nova.virt.baremetal.virtual_power_driver.VirtualPowerManager",
558 "ssh-key": "INSERT_STACK_USER_PRIV_KEY",
564 ##Create virtual nodes in virsh
565 ##params: name - String: libvirt name for VM
566 ## bootdev - String: boot device for the VM
567 ## disksize - Number: size of the disk in GB
568 ## ovs_bridges: - List: list of ovs bridges
569 ## vcpus - Number of VCPUs to use (defaults to 4)
570 ## ramsize - Size of RAM for VM in MB (defaults to 8192)
571 function define_vm () {
577 elif [ -z "$6" ]; then
585 # Create the libvirt storage volume
586 if virsh vol-list default | grep ${1}.qcow2 2>&1> /dev/null; then
587 volume_path=$(virsh vol-path --pool default ${1}.qcow2 || echo "/var/lib/libvirt/images/${1}.qcow2")
588 echo "Volume ${1} exists. Deleting Existing Volume $volume_path"
589 virsh vol-dumpxml ${1}.qcow2 --pool default > /dev/null || echo '' #ok for this to fail
591 virsh vol-delete ${1}.qcow2 --pool default
593 virsh vol-create-as default ${1}.qcow2 ${3}G --format qcow2
594 volume_path=$(virsh vol-path --pool default ${1}.qcow2)
595 if [ ! -f $volume_path ]; then
596 echo "$volume_path Not created successfully... Aborting"
601 /usr/libexec/openstack-tripleo/configure-vm --name $1 \
603 --image "$volume_path" \
608 --libvirt-nic-driver virtio \
609 --baremetal-interface $4
612 ##Copy over the glance images and instackenv json file
614 function configure_undercloud {
615 local controller_nic_template compute_nic_template
617 echo "Copying configuration files to Undercloud"
618 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
619 echo -e "${blue}Network Environment set for Deployment: ${reset}"
620 cat /tmp/network-environment.yaml
621 scp ${SSH_OPTIONS[@]} /tmp/network-environment.yaml "stack@$UNDERCLOUD":
623 # check for ODL L3/ONOS
624 if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
628 if ! controller_nic_template=$(python3.4 -B $LIB/python/apex-python-utils.py nic-template -t $CONFIG/nics-controller.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family); then
629 echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
633 if ! compute_nic_template=$(python3.4 -B $LIB/python/apex-python-utils.py nic-template -t $CONFIG/nics-compute.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family); then
634 echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
637 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
639 cat > nics/controller.yaml << EOF
640 $controller_nic_template
642 cat > nics/compute.yaml << EOF
643 $compute_nic_template
648 # ensure stack user on Undercloud machine has an ssh key
649 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
651 if [ "$virtual" == "TRUE" ]; then
653 # copy the Undercloud VM's stack user's pub key to
654 # root's auth keys so that Undercloud can control
655 # vm power on the hypervisor
656 ssh ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> /root/.ssh/authorized_keys
658 DEPLOY_OPTIONS+=" --libvirt-type qemu"
659 INSTACKENV=$CONFIG/instackenv-virt.json
661 # upload instackenv file to Undercloud for virtual deployment
662 scp ${SSH_OPTIONS[@]} $INSTACKENV "stack@$UNDERCLOUD":instackenv.json
665 # allow stack to control power management on the hypervisor via sshkey
666 # only if this is a virtual deployment
667 if [ "$virtual" == "TRUE" ]; then
668 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
669 while read -r line; do
670 stack_key=\${stack_key}\\\\\\\\n\${line}
671 done < <(cat ~/.ssh/id_rsa)
672 stack_key=\$(echo \$stack_key | sed 's/\\\\\\\\n//')
673 sed -i 's~INSERT_STACK_USER_PRIV_KEY~'"\$stack_key"'~' instackenv.json
677 # copy stack's ssh key to this users authorized keys
678 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> ~/.ssh/authorized_keys
680 # disable requiretty for sudo
681 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "sed -i 's/Defaults\s*requiretty//'" /etc/sudoers
683 # configure undercloud on Undercloud VM
684 echo "Running undercloud configuration."
685 echo "Logging undercloud configuration to undercloud:/home/stack/apex-undercloud-install.log"
686 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
687 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
688 sed -i 's/#local_ip/local_ip/' undercloud.conf
689 sed -i 's/#network_gateway/network_gateway/' undercloud.conf
690 sed -i 's/#network_cidr/network_cidr/' undercloud.conf
691 sed -i 's/#dhcp_start/dhcp_start/' undercloud.conf
692 sed -i 's/#dhcp_end/dhcp_end/' undercloud.conf
693 sed -i 's/#inspection_iprange/inspection_iprange/' undercloud.conf
694 sed -i 's/#undercloud_debug/undercloud_debug/' undercloud.conf
696 openstack-config --set undercloud.conf DEFAULT local_ip ${admin_network_provisioner_ip}/${admin_network_cidr##*/}
697 openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_network_provisioner_ip}
698 openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_network_cidr}
699 openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_network_dhcp_range%%,*}
700 openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_network_dhcp_range##*,}
701 openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_network_introspection_range}
702 openstack-config --set undercloud.conf DEFAULT undercloud_debug false
706 sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
707 sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
708 sudo sed -i '/CephAdminKey:/c\\ CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
710 # we assume that packages will not need to be updated with undercloud install
711 # and that it will be used only to configure the undercloud
712 # packages updates would need to be handled manually with yum update
713 sudo cp -f /usr/share/diskimage-builder/elements/yum/bin/install-packages /usr/share/diskimage-builder/elements/yum/bin/install-packages.bak
714 cat << 'EOF' | sudo tee /usr/share/diskimage-builder/elements/yum/bin/install-packages > /dev/null
719 openstack undercloud install &> apex-undercloud-install.log || {
720 # cat the undercloud install log incase it fails
721 echo "ERROR: openstack undercloud install has failed. Dumping Log:"
722 cat apex-undercloud-install.log
727 sudo systemctl restart openstack-glance-api
728 sudo systemctl restart openstack-nova-conductor
729 sudo systemctl restart openstack-nova-compute
731 sudo sed -i '/num_engine_workers/c\num_engine_workers = 2' /etc/heat/heat.conf
732 sudo sed -i '/#workers\s=/c\workers = 2' /etc/heat/heat.conf
733 sudo systemctl restart openstack-heat-engine
734 sudo systemctl restart openstack-heat-api
736 # WORKAROUND: must restart the above services to fix sync problem with nova compute manager
737 # TODO: revisit and file a bug if necessary. This should eventually be removed
738 # as well as glance api problem
739 echo -e "${blue}INFO: Sleeping 15 seconds while services come back from restart${reset}"
744 ##preping it for deployment and launch the deploy
746 function undercloud_prep_overcloud_deploy {
747 if [[ "${#deploy_options_array[@]}" -eq 0 || "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
748 if [ "${deploy_options_array['sdn_l3']}" == 'true' ]; then
749 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_l3.yaml"
750 elif [ "${deploy_options_array['sfc']}" == 'True' ]; then
751 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sfc.yaml"
752 elif [ "${deploy_options_array['vpn']}" == 'True' ]; then
753 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sdnvpn.yaml"
755 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight.yaml"
757 SDN_IMAGE=opendaylight
758 if [ "${deploy_options_array['sfc']}" == 'True' ]; then
760 if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
761 echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute an SFC deployment."
762 echo "Please install the opnfv-apex-opendaylight-sfc package to provide this overcloud image for deployment.${reset}"
766 elif [ "${deploy_options_array['sdn_controller']}" == 'opendaylight-external' ]; then
767 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight-external.yaml"
768 SDN_IMAGE=opendaylight
769 elif [ "${deploy_options_array['sdn_controller']}" == 'onos' ]; then
770 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/onos.yaml"
772 elif [ "${deploy_options_array['sdn_controller']}" == 'opencontrail' ]; then
773 echo -e "${red}ERROR: OpenContrail is currently unsupported...exiting${reset}"
775 elif [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then
776 echo -e "${blue}INFO: SDN Controller disabled...will deploy nosdn scenario${reset}"
777 SDN_IMAGE=opendaylight
779 echo "${red}Invalid sdn_controller: ${deploy_options_array['sdn_controller']}${reset}"
780 echo "${red}Valid choices are opendaylight, opendaylight-external, onos, opencontrail, False, or null${reset}"
784 # Make sure the correct overcloud image is available
785 if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
786 echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
787 echo "Both ONOS and OpenDaylight are currently deployed from this image."
788 echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}"
792 echo "Copying overcloud image to Undercloud"
793 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
794 scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
796 # Push performance options to subscript to modify per-role images as needed
797 for option in "${performance_options[@]}" ; do
798 echo -e "${blue}Setting performance option $option${reset}"
799 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "bash build_perf_image.sh $option"
802 # Add performance deploy options if they have been set
803 if [ ! -z "${deploy_options_array['performance']}" ]; then
804 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml"
807 # make sure ceph is installed
808 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
810 # scale compute nodes according to inventory
811 total_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/instackenv.json | grep -c memory")
813 # check if HA is enabled
814 if [[ "$ha_enabled" == "True" ]]; then
815 DEPLOY_OPTIONS+=" --control-scale 3"
816 compute_nodes=$((total_nodes - 3))
817 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml"
819 compute_nodes=$((total_nodes - 1))
822 if [ "$compute_nodes" -le 0 ]; then
823 echo -e "${red}ERROR: Invalid number of compute nodes: ${compute_nodes}. Check your inventory file.${reset}"
826 echo -e "${blue}INFO: Number of compute nodes set for deployment: ${compute_nodes}${reset}"
827 DEPLOY_OPTIONS+=" --compute-scale ${compute_nodes}"
830 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
831 #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
832 DEPLOY_OPTIONS+=" -e network-environment.yaml"
835 if [[ "$ha_enabled" == "True" ]] || [[ "$net_isolation_enabled" == "TRUE" ]]; then
836 DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
839 if [[ ! "$virtual" == "TRUE" ]]; then
840 DEPLOY_OPTIONS+=" --control-flavor control --compute-flavor compute"
842 DEPLOY_OPTIONS+=" -e virtual-environment.yaml"
845 DEPLOY_OPTIONS+=" -e opnfv-environment.yaml"
847 echo -e "${blue}INFO: Deploy options set:\n${DEPLOY_OPTIONS}${reset}"
849 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
850 if [ "$debug" == 'TRUE' ]; then
851 LIBGUESTFS_BACKEND=direct virt-customize -a overcloud-full.qcow2 --root-password password:opnfvapex
856 echo "Uploading overcloud glance images"
857 openstack overcloud image upload
859 bash -x set_perf_images.sh ${performance_roles[@]}
861 echo "Configuring undercloud and discovering nodes"
862 openstack baremetal import --json instackenv.json
863 openstack baremetal configure boot
864 #if [[ -z "$virtual" ]]; then
865 # openstack baremetal introspection bulk start
867 echo "Configuring flavors"
868 for flavor in baremetal control compute; do
869 echo -e "${blue}INFO: Updating flavor: \${flavor}${reset}"
870 if openstack flavor list | grep \${flavor}; then
871 openstack flavor delete \${flavor}
873 openstack flavor create --id auto --ram 4096 --disk 39 --vcpus 1 \${flavor}
874 if ! openstack flavor list | grep \${flavor}; then
875 echo -e "${red}ERROR: Unable to create flavor \${flavor}${reset}"
878 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal
879 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="control" control
880 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="compute" compute
881 echo "Configuring nameserver on ctlplane network"
882 neutron subnet-update \$(neutron subnet-list | grep -v id | grep -v \\\\-\\\\- | awk {'print \$2'}) --dns-nameserver 8.8.8.8
883 echo "Executing overcloud deployment, this should run for an extended period without output."
884 sleep 60 #wait for Hypervisor stats to check-in to nova
885 # save deploy command so it can be used for debugging
886 cat > deploy_command << EOF
887 openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
891 if [ "$interactive" == "TRUE" ]; then
892 if ! prompt_user "Overcloud Deployment"; then
893 echo -e "${blue}INFO: User requests exit${reset}"
898 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
900 openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
901 if ! heat stack-list | grep CREATE_COMPLETE 1>/dev/null; then
902 $(typeset -f debug_stack)
908 if [ "$debug" == 'TRUE' ]; then
909 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
911 echo "Keystone Endpoint List:"
912 keystone endpoint-list
913 echo "Keystone Service List"
914 keystone service-list
915 cinder quota-show \$(openstack project list | grep admin | awk {'print \$2'})
920 ##Post configuration after install
922 function configure_post_install {
923 local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip
924 opnfv_attach_networks="admin_network public_network"
926 echo -e "${blue}INFO: Post Install Configuration Running...${reset}"
928 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
931 echo "Configuring Neutron external network"
932 neutron net-create external --router:external=True --tenant-id \$(keystone tenant-get service | grep id | awk '{ print \$4 }')
933 neutron subnet-create --name external-net --tenant-id \$(keystone tenant-get service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
936 echo -e "${blue}INFO: Checking if OVS bridges have IP addresses...${reset}"
937 for network in ${opnfv_attach_networks}; do
938 ovs_ip=$(find_ip ${NET_MAP[$network]})
940 if [ -n "$ovs_ip" ]; then
941 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} has IP address ${ovs_ip}${reset}"
943 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} missing IP, will configure${reset}"
944 # use last IP of allocation pool
945 eval "ip_range=\${${network}_usable_ip_range}"
946 ovs_ip=${ip_range##*,}
947 eval "net_cidr=\${${network}_cidr}"
948 sudo ip addr add ${ovs_ip}/${net_cidr##*/} dev ${NET_MAP[$network]}
949 sudo ip link set up ${NET_MAP[$network]}
950 tmp_ip=$(find_ip ${NET_MAP[$network]})
951 if [ -n "$tmp_ip" ]; then
952 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} IP set: ${tmp_ip}${reset}"
955 echo -e "${red}ERROR: Unable to set OVS Bridge ${NET_MAP[$network]} with IP: ${ovs_ip}${reset}"
961 # for virtual, we NAT public network through Undercloud
962 if [ "$virtual" == "TRUE" ]; then
963 if ! configure_undercloud_nat ${public_network_cidr}; then
964 echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${public_network_cidr}${reset}"
967 echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud public network${reset}"
971 # for sfc deployments we need the vxlan workaround
972 if [ "${deploy_options_array['sfc']}" == 'True' ]; then
973 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
976 for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
977 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
978 sudo ifconfig br-int up
979 sudo ip route add 123.123.123.0/24 dev br-int
985 # Collect deployment logs
986 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
987 mkdir -p ~/deploy_logs
991 for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
992 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
993 sudo cp /var/log/messages /home/heat-admin/messages.log
994 sudo chown heat-admin /home/heat-admin/messages.log
996 scp ${SSH_OPTIONS[@]} heat-admin@\$node:/home/heat-admin/messages.log ~/deploy_logs/\$node.messages.log
997 if [ "$debug" == "TRUE" ]; then
998 nova list --ip \$node
999 echo "---------------------------"
1000 echo "-----/var/log/messages-----"
1001 echo "---------------------------"
1002 cat ~/deploy_logs/\$node.messages.log
1003 echo "---------------------------"
1004 echo "----------END LOG----------"
1005 echo "---------------------------"
1007 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
1008 sudo rm -f /home/heat-admin/messages.log
1012 # Print out the dashboard URL
1014 echo "Overcloud dashboard available at http://\$(heat output-show overcloud PublicVip | sed 's/"//g')/dashboard"
1020 echo -e "Usage:\n$0 [arguments] \n"
1021 echo -e " -d|--deploy-settings : Full path to deploy settings yaml file. Optional. Defaults to null"
1022 echo -e " -i|--inventory : Full path to inventory yaml file. Required only for baremetal"
1023 echo -e " -n|--net-settings : Full path to network settings file. Optional."
1024 echo -e " -p|--ping-site : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8"
1025 echo -e " -v|--virtual : Virtualize overcloud nodes instead of using baremetal."
1026 echo -e " --flat : disable Network Isolation and use a single flat network for the underlay network."
1027 echo -e " --no-post-config : disable Post Install configuration."
1028 echo -e " --debug : enable debug output."
1029 echo -e " --interactive : enable interactive deployment mode which requires user to confirm steps of deployment."
1030 echo -e " --virtual-cpus : Number of CPUs to use per Overcloud VM in a virtual deployment (defaults to 4)."
1031 echo -e " --virtual-ram : Amount of RAM to use per Overcloud VM in GB (defaults to 8)."
1034 ##translates the command line parameters into variables
1035 ##params: $@ the entire command line is passed
1036 ##usage: parse_cmd_line() "$@"
1038 echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
1039 echo "Use -h to display help"
1042 while [ "${1:0:1}" = "-" ]
1049 -d|--deploy-settings)
1050 DEPLOY_SETTINGS_FILE=$2
1051 echo "Deployment Configuration file: $2"
1060 echo "Network Settings Configuration file: $2"
1065 echo "Using $2 as the ping site"
1070 echo "Executing a Virtual Deployment"
1074 net_isolation_enabled="FALSE"
1075 echo "Underlay Network Isolation Disabled: using flat configuration"
1080 echo "Post install configuration disabled"
1085 echo "Enable debug output"
1090 echo "Interactive mode enabled"
1095 echo "Number of CPUs per VM set to $VM_CPUS"
1100 echo "Amount of RAM per VM set to $VM_RAM"
1103 --virtual-computes )
1105 echo "Virtual Compute nodes set to $VM_COMPUTES"
1115 if [[ ! -z "$NETSETS" && "$net_isolation_enabled" == "FALSE" ]]; then
1116 echo -e "${red}INFO: Single flat network requested. Only admin_network settings will be used!${reset}"
1117 elif [[ -z "$NETSETS" ]]; then
1118 echo -e "${red}ERROR: You must provide a network_settings file with -n.${reset}"
1122 if [[ -n "$virtual" && -n "$INVENTORY_FILE" ]]; then
1123 echo -e "${red}ERROR: You should not specify an inventory with virtual deployments${reset}"
1127 if [[ -z "$DEPLOY_SETTINGS_FILE" || ! -f "$DEPLOY_SETTINGS_FILE" ]]; then
1128 echo -e "${red}ERROR: Deploy Settings: ${DEPLOY_SETTINGS_FILE} does not exist! Exiting...${reset}"
1132 if [[ ! -z "$NETSETS" && ! -f "$NETSETS" ]]; then
1133 echo -e "${red}ERROR: Network Settings: ${NETSETS} does not exist! Exiting...${reset}"
1137 if [[ ! -z "$INVENTORY_FILE" && ! -f "$INVENTORY_FILE" ]]; then
1138 echo -e "{$red}ERROR: Inventory File: ${INVENTORY_FILE} does not exist! Exiting...${reset}"
1142 if [[ -z "$virtual" && -z "$INVENTORY_FILE" ]]; then
1143 echo -e "${red}ERROR: You must specify an inventory file for baremetal deployments! Exiting...${reset}"
1147 if [[ "$net_isolation_enabled" == "FALSE" && "$post_config" == "TRUE" ]]; then
1148 echo -e "${blue}INFO: Post Install Configuration will be skipped. It is not supported with --flat${reset}"
1157 # Make sure jinja2 is installed
1158 easy_install-3.4 jinja2 > /dev/null
1160 echo -e "${blue}INFO: Parsing network settings file...${reset}"
1161 parse_network_settings
1162 if ! configure_deps; then
1163 echo -e "${red}Dependency Validation Failed, Exiting.${reset}"
1166 if [ -n "$DEPLOY_SETTINGS_FILE" ]; then
1167 echo -e "${blue}INFO: Parsing deploy settings file...${reset}"
1168 parse_deploy_settings
1171 if [ "$virtual" == "TRUE" ]; then
1172 setup_virtual_baremetal $VM_CPUS $VM_RAM
1173 elif [ -n "$INVENTORY_FILE" ]; then
1174 parse_inventory_file
1176 configure_undercloud
1177 undercloud_prep_overcloud_deploy
1178 if [ "$post_config" == "TRUE" ]; then
1179 if ! configure_post_install; then
1180 echo -e "${red}ERROR:Post Install Configuration Failed, Exiting.${reset}"
1183 echo -e "${blue}INFO: Post Install Configuration Complete${reset}"
1186 if [[ "${deploy_options_array['sdn_controller']}" == 'onos' ]]; then
1187 if ! onos_update_gw_mac ${public_network_cidr} ${public_network_gateway}; then
1188 echo -e "${red}ERROR:ONOS Post Install Configuration Failed, Exiting.${reset}"
1191 echo -e "${blue}INFO: ONOS Post Install Configuration Complete${reset}"