2 ##############################################################################
3 # Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
11 # Deploy script to install provisioning server for OPNFV Apex
12 # author: Dan Radez (dradez@redhat.com)
13 # author: Tim Rozet (trozet@redhat.com)
15 # Based on RDO Manager http://www.rdoproject.org
20 reset=$(tput sgr0 || echo "")
21 blue=$(tput setaf 4 || echo "")
22 red=$(tput setaf 1 || echo "")
23 green=$(tput setaf 2 || echo "")
27 ntp_server="pool.ntp.org"
28 net_isolation_enabled="TRUE"
34 declare -A deploy_options_array
35 declare -a performance_options
38 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
40 CONFIG=${CONFIG:-'/var/opt/opnfv'}
41 RESOURCES=${RESOURCES:-"$CONFIG/images"}
42 LIB=${LIB:-"$CONFIG/lib"}
43 OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network api_network"
49 # Netmap used to map networks to OVS bridge names
50 NET_MAP['admin_network']="br-admin"
51 NET_MAP['private_network']="br-private"
52 NET_MAP['public_network']="br-public"
53 NET_MAP['storage_network']="br-storage"
54 NET_MAP['api_network']="br-api"
55 ext_net_type="interface"
60 $LIB/common-functions.sh
61 $LIB/configure-deps-functions.sh
62 $LIB/parse-functions.sh
63 $LIB/virtual-setup-functions.sh
64 $LIB/utility-functions.sh
65 $LIB/installer/onos/onos_gw_mac_update.sh
67 for lib_file in ${lib_files[@]}; do
68 if ! source $lib_file; then
69 echo -e "${red}ERROR: Failed to source $lib_file${reset}"
75 ##checks if prefix exists in string
76 ##params: string, prefix
77 ##usage: contains_prefix "deploy_setting_launcher=1" "deploy_setting"
81 if echo $mystr | grep -E "^$prefix.*$" > /dev/null; then
88 ##verify internet connectivity
90 function verify_internet {
91 if ping -c 2 $ping_site > /dev/null; then
92 if ping -c 2 www.google.com > /dev/null; then
93 echo "${blue}Internet connectivity detected${reset}"
96 echo "${red}Internet connectivity detected, but DNS lookup failed${reset}"
100 echo "${red}No internet connectivity detected${reset}"
105 ##verify vm exists, an has a dhcp lease assigned to it
107 function setup_undercloud_vm {
108 if ! virsh list --all | grep undercloud > /dev/null; then
109 undercloud_nets="default admin_network"
110 if [[ $enabled_network_list =~ "public_network" ]]; then
111 undercloud_nets+=" public_network"
113 define_vm undercloud hd 30 "$undercloud_nets" 4 12288
115 ### this doesn't work for some reason I was getting hangup events so using cp instead
116 #virsh vol-upload --pool default --vol undercloud.qcow2 --file $CONFIG/stack/undercloud.qcow2
117 #2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem <http://bugs.centos.org>, 2015-11-03-13:56:46, worker1.bsys.centos.org)
118 #2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
119 #2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
120 #error: cannot close volume undercloud.qcow2
121 #error: internal error: received hangup / error event on socket
122 #error: Reconnected to the hypervisor
124 local undercloud_dst=/var/lib/libvirt/images/undercloud.qcow2
125 cp -f $RESOURCES/undercloud.qcow2 $undercloud_dst
127 # resize Undercloud machine
128 echo "Checking if Undercloud needs to be resized..."
129 undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
130 if [ "$undercloud_size" -lt 30 ]; then
131 qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G
132 LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/undercloud.qcow2 $undercloud_dst
133 LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
134 new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
135 if [ "$new_size" -lt 30 ]; then
136 echo "Error resizing Undercloud machine, disk size is ${new_size}"
139 echo "Undercloud successfully resized"
142 echo "Skipped Undercloud resize, upstream is large enough"
146 echo "Found Undercloud VM, using existing VM"
149 # if the VM is not running update the authkeys and start it
150 if ! virsh list | grep undercloud > /dev/null; then
151 echo "Injecting ssh key to Undercloud VM"
152 LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command "mkdir -p /root/.ssh/" \
153 --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
154 --run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
155 --run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
156 --run-command "chown stack:stack /home/stack/.ssh/authorized_keys && chmod 600 /home/stack/.ssh/authorized_keys"
157 virsh start undercloud
160 sleep 10 # let undercloud get started up
162 # get the undercloud VM IP
164 echo -n "${blue}Waiting for Undercloud's dhcp address${reset}"
165 undercloud_mac=$(virsh domiflist undercloud | grep default | awk '{ print $5 }')
166 while ! $(arp -e | grep ${undercloud_mac} > /dev/null) && [ $CNT -gt 0 ]; do
171 UNDERCLOUD=$(arp -e | grep ${undercloud_mac} | awk {'print $1'})
173 if [ -z "$UNDERCLOUD" ]; then
174 echo "\n\nCan't get IP for Undercloud. Can Not Continue."
177 echo -e "${blue}\rUndercloud VM has IP $UNDERCLOUD${reset}"
181 echo -en "${blue}\rValidating Undercloud VM connectivity${reset}"
182 while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do
187 if [ "$CNT" -eq 0 ]; then
188 echo "Failed to contact Undercloud. Can Not Continue"
192 while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do
197 if [ "$CNT" -eq 0 ]; then
198 echo "Failed to connect to Undercloud. Can Not Continue"
202 # extra space to overwrite the previous connectivity output
203 echo -e "${blue}\r ${reset}"
206 # ssh key fix for stack user
207 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack"
210 ##Copy over the glance images and instackenv json file
212 function configure_undercloud {
213 local controller_nic_template compute_nic_template
215 echo "Copying configuration files to Undercloud"
216 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
217 echo -e "${blue}Network Environment set for Deployment: ${reset}"
218 cat /tmp/network-environment.yaml
219 scp ${SSH_OPTIONS[@]} /tmp/network-environment.yaml "stack@$UNDERCLOUD":
221 # check for ODL L3/ONOS
222 if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
226 if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
227 ovs_dpdk_bridge='br-phy'
232 if ! controller_nic_template=$(python3.4 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -i $net_isolation_enabled -t $CONFIG/nics-template.yaml.jinja2 -n "$enabled_network_list" -e "br-ex" -af $ip_addr_family); then
233 echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
237 if ! compute_nic_template=$(python3.4 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -i $net_isolation_enabled -t $CONFIG/nics-template.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family -d "$ovs_dpdk_bridge"); then
238 echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
241 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
243 cat > nics/controller.yaml << EOF
244 $controller_nic_template
246 cat > nics/compute.yaml << EOF
247 $compute_nic_template
252 # ensure stack user on Undercloud machine has an ssh key
253 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
255 if [ "$virtual" == "TRUE" ]; then
257 # copy the Undercloud VM's stack user's pub key to
258 # root's auth keys so that Undercloud can control
259 # vm power on the hypervisor
260 ssh ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> /root/.ssh/authorized_keys
262 DEPLOY_OPTIONS+=" --libvirt-type qemu"
263 INSTACKENV=$CONFIG/instackenv-virt.json
265 # upload instackenv file to Undercloud for virtual deployment
266 scp ${SSH_OPTIONS[@]} $INSTACKENV "stack@$UNDERCLOUD":instackenv.json
269 # allow stack to control power management on the hypervisor via sshkey
270 # only if this is a virtual deployment
271 if [ "$virtual" == "TRUE" ]; then
272 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
273 while read -r line; do
274 stack_key=\${stack_key}\\\\\\\\n\${line}
275 done < <(cat ~/.ssh/id_rsa)
276 stack_key=\$(echo \$stack_key | sed 's/\\\\\\\\n//')
277 sed -i 's~INSERT_STACK_USER_PRIV_KEY~'"\$stack_key"'~' instackenv.json
281 # copy stack's ssh key to this users authorized keys
282 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> ~/.ssh/authorized_keys
284 # disable requiretty for sudo
285 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "sed -i 's/Defaults\s*requiretty//'" /etc/sudoers
287 # configure undercloud on Undercloud VM
288 echo "Running undercloud configuration."
289 echo "Logging undercloud configuration to undercloud:/home/stack/apex-undercloud-install.log"
290 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
291 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
292 sed -i 's/#local_ip/local_ip/' undercloud.conf
293 sed -i 's/#network_gateway/network_gateway/' undercloud.conf
294 sed -i 's/#network_cidr/network_cidr/' undercloud.conf
295 sed -i 's/#dhcp_start/dhcp_start/' undercloud.conf
296 sed -i 's/#dhcp_end/dhcp_end/' undercloud.conf
297 sed -i 's/#inspection_iprange/inspection_iprange/' undercloud.conf
298 sed -i 's/#undercloud_debug/undercloud_debug/' undercloud.conf
300 openstack-config --set undercloud.conf DEFAULT local_ip ${admin_network_provisioner_ip}/${admin_network_cidr##*/}
301 openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_network_provisioner_ip}
302 openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_network_cidr}
303 openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_network_dhcp_range%%,*}
304 openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_network_dhcp_range##*,}
305 openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_network_introspection_range}
306 openstack-config --set undercloud.conf DEFAULT undercloud_debug false
310 sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
311 sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
312 sudo sed -i '/CephAdminKey:/c\\ CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
314 # we assume that packages will not need to be updated with undercloud install
315 # and that it will be used only to configure the undercloud
316 # packages updates would need to be handled manually with yum update
317 sudo cp -f /usr/share/diskimage-builder/elements/yum/bin/install-packages /usr/share/diskimage-builder/elements/yum/bin/install-packages.bak
318 cat << 'EOF' | sudo tee /usr/share/diskimage-builder/elements/yum/bin/install-packages > /dev/null
323 openstack undercloud install &> apex-undercloud-install.log || {
324 # cat the undercloud install log incase it fails
325 echo "ERROR: openstack undercloud install has failed. Dumping Log:"
326 cat apex-undercloud-install.log
331 sudo systemctl restart openstack-glance-api
332 sudo systemctl restart openstack-nova-conductor
333 sudo systemctl restart openstack-nova-compute
335 sudo sed -i '/num_engine_workers/c\num_engine_workers = 2' /etc/heat/heat.conf
336 sudo sed -i '/#workers\s=/c\workers = 2' /etc/heat/heat.conf
337 sudo systemctl restart openstack-heat-engine
338 sudo systemctl restart openstack-heat-api
341 # configure external network
342 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" << EOI
343 if [[ "$public_network_vlan" != "native" ]]; then
344 cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-vlan${public_network_vlan}
345 DEVICE=vlan${public_network_vlan}
350 IPADDR=${public_network_provisioner_ip}
351 PREFIX=${public_network_cidr##*/}
352 OVS_BRIDGE=br-ctlplane
353 OVS_OPTIONS="tag=${public_network_vlan}"
355 ifup vlan${public_network_vlan}
357 if ! ip a s eth2 | grep ${public_network_provisioner_ip} > /dev/null; then
358 ip a a ${public_network_provisioner_ip}/${public_network_cidr##*/} dev eth2
359 ip link set up dev eth2
364 # WORKAROUND: must restart the above services to fix sync problem with nova compute manager
365 # TODO: revisit and file a bug if necessary. This should eventually be removed
366 # as well as glance api problem
367 echo -e "${blue}INFO: Sleeping 15 seconds while services come back from restart${reset}"
372 ##preping it for deployment and launch the deploy
374 function undercloud_prep_overcloud_deploy {
375 if [[ "${#deploy_options_array[@]}" -eq 0 || "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
376 if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
377 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_l3.yaml"
378 elif [ "${deploy_options_array['sfc']}" == 'True' ]; then
379 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sfc.yaml"
380 elif [ "${deploy_options_array['vpn']}" == 'True' ]; then
381 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sdnvpn.yaml"
382 elif [ "${deploy_options_array['vpp']}" == 'True' ]; then
383 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_fdio.yaml"
385 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight.yaml"
387 SDN_IMAGE=opendaylight
388 if [ "${deploy_options_array['sfc']}" == 'True' ]; then
390 if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
391 echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute an SFC deployment."
392 echo "Please install the opnfv-apex-opendaylight-sfc package to provide this overcloud image for deployment.${reset}"
396 elif [ "${deploy_options_array['sdn_controller']}" == 'opendaylight-external' ]; then
397 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight-external.yaml"
398 SDN_IMAGE=opendaylight
399 elif [ "${deploy_options_array['sdn_controller']}" == 'onos' ]; then
400 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/onos.yaml"
402 elif [ "${deploy_options_array['sdn_controller']}" == 'opencontrail' ]; then
403 echo -e "${red}ERROR: OpenContrail is currently unsupported...exiting${reset}"
405 elif [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then
406 echo -e "${blue}INFO: SDN Controller disabled...will deploy nosdn scenario${reset}"
407 SDN_IMAGE=opendaylight
409 echo "${red}Invalid sdn_controller: ${deploy_options_array['sdn_controller']}${reset}"
410 echo "${red}Valid choices are opendaylight, opendaylight-external, onos, opencontrail, False, or null${reset}"
416 # Make sure the correct overcloud image is available
417 if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
418 echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
419 echo "Both ONOS and OpenDaylight are currently deployed from this image."
420 echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}"
424 echo "Copying overcloud image to Undercloud"
425 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
426 scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
428 # Install ovs-dpdk inside the overcloud image if it is enabled.
429 if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
430 # install dpdk packages before ovs
431 echo -e "${blue}INFO: Enabling kernel modules for dpdk inside overcloud image${reset}"
433 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
434 cat << EOF > vfio_pci.modules
436 exec /sbin/modprobe vfio_pci >/dev/null 2>&1
439 cat << EOF > uio_pci_generic.modules
441 exec /sbin/modprobe uio_pci_generic >/dev/null 2>&1
444 LIBGUESTFS_BACKEND=direct virt-customize --upload vfio_pci.modules:/etc/sysconfig/modules/ \
445 --upload uio_pci_generic.modules:/etc/sysconfig/modules/ \
446 --run-command "chmod 0755 /etc/sysconfig/modules/vfio_pci.modules" \
447 --run-command "chmod 0755 /etc/sysconfig/modules/uio_pci_generic.modules" \
448 --run-command "yum install -y /root/dpdk_rpms/*" \
449 -a overcloud-full.qcow2
451 elif [ "${deploy_options_array['dataplane']}" != 'ovs' ]; then
452 echo "${red}${deploy_options_array['dataplane']} not supported${reset}"
456 # Set ODL version accordingly
457 if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "${deploy_options_array['odl_version']}" == 'boron' ]]; then
458 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
459 LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight" \
460 --run-command "yum -y install /root/boron/*" \
461 -a overcloud-full.qcow2
465 # Add performance deploy options if they have been set
466 if [ ! -z "${deploy_options_array['performance']}" ]; then
468 # Remove previous kernel args files per role
469 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f Compute-kernel_params.txt"
470 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f Controller-kernel_params.txt"
472 # Push performance options to subscript to modify per-role images as needed
473 for option in "${performance_options[@]}" ; do
474 echo -e "${blue}Setting performance option $option${reset}"
475 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "bash build_perf_image.sh $option"
478 # Build IPA kernel option ramdisks
479 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
480 /bin/cp -f /home/stack/ironic-python-agent.initramfs /root/
483 gunzip -c ../ironic-python-agent.initramfs | cpio -i
484 if [ ! -f /home/stack/Compute-kernel_params.txt ]; then
485 touch /home/stack/Compute-kernel_params.txt
486 chown stack /home/stack/Compute-kernel_params.txt
488 /bin/cp -f /home/stack/Compute-kernel_params.txt tmp/kernel_params.txt
489 echo "Compute params set: "
490 cat tmp/kernel_params.txt
491 /bin/cp -f /root/image.py usr/lib/python2.7/site-packages/ironic_python_agent/extensions/image.py
492 /bin/cp -f /root/image.pyc usr/lib/python2.7/site-packages/ironic_python_agent/extensions/image.pyc
493 find . | cpio -o -H newc | gzip > /home/stack/Compute-ironic-python-agent.initramfs
494 chown stack /home/stack/Compute-ironic-python-agent.initramfs
495 if [ ! -f /home/stack/Controller-kernel_params.txt ]; then
496 touch /home/stack/Controller-kernel_params.txt
497 chown stack /home/stack/Controller-kernel_params.txt
499 /bin/cp -f /home/stack/Controller-kernel_params.txt tmp/kernel_params.txt
500 echo "Controller params set: "
501 cat tmp/kernel_params.txt
502 find . | cpio -o -H newc | gzip > /home/stack/Controller-ironic-python-agent.initramfs
503 chown stack /home/stack/Controller-ironic-python-agent.initramfs
508 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml"
511 # make sure ceph is installed
512 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
514 # scale compute nodes according to inventory
515 total_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/instackenv.json | grep -c memory")
517 # check if HA is enabled
518 if [[ "$ha_enabled" == "True" ]]; then
519 DEPLOY_OPTIONS+=" --control-scale 3"
520 compute_nodes=$((total_nodes - 3))
521 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml"
523 compute_nodes=$((total_nodes - 1))
526 if [ "$compute_nodes" -le 0 ]; then
527 echo -e "${red}ERROR: Invalid number of compute nodes: ${compute_nodes}. Check your inventory file.${reset}"
530 echo -e "${blue}INFO: Number of compute nodes set for deployment: ${compute_nodes}${reset}"
531 DEPLOY_OPTIONS+=" --compute-scale ${compute_nodes}"
534 if [[ "$net_isolation_enabled" == "TRUE" ]]; then
535 #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
536 DEPLOY_OPTIONS+=" -e network-environment.yaml"
539 if [[ "$ha_enabled" == "True" ]] || [[ "$net_isolation_enabled" == "TRUE" ]]; then
540 DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
543 if [[ ! "$virtual" == "TRUE" ]]; then
544 DEPLOY_OPTIONS+=" --control-flavor control --compute-flavor compute"
546 DEPLOY_OPTIONS+=" -e virtual-environment.yaml"
549 DEPLOY_OPTIONS+=" -e opnfv-environment.yaml"
551 echo -e "${blue}INFO: Deploy options set:\n${DEPLOY_OPTIONS}${reset}"
553 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
554 if [ "$debug" == 'TRUE' ]; then
555 LIBGUESTFS_BACKEND=direct virt-customize -a overcloud-full.qcow2 --root-password password:opnfvapex
560 echo "Uploading overcloud glance images"
561 openstack overcloud image upload
563 echo "Configuring undercloud and discovering nodes"
564 openstack baremetal import --json instackenv.json
565 openstack baremetal configure boot
566 bash -x set_perf_images.sh ${performance_roles[@]}
567 #if [[ -z "$virtual" ]]; then
568 # openstack baremetal introspection bulk start
570 echo "Configuring flavors"
571 for flavor in baremetal control compute; do
572 echo -e "${blue}INFO: Updating flavor: \${flavor}${reset}"
573 if openstack flavor list | grep \${flavor}; then
574 openstack flavor delete \${flavor}
576 openstack flavor create --id auto --ram 4096 --disk 39 --vcpus 1 \${flavor}
577 if ! openstack flavor list | grep \${flavor}; then
578 echo -e "${red}ERROR: Unable to create flavor \${flavor}${reset}"
581 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal
582 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="control" control
583 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="compute" compute
584 echo "Configuring nameserver on ctlplane network"
586 for dns_server in ${dns_servers}; do
587 dns_server_ext="\${dns_server_ext} --dns-nameserver \${dns_server}"
589 neutron subnet-update \$(neutron subnet-list | grep -Ev "id|tenant|external|storage" | grep -v \\\\-\\\\- | awk {'print \$2'}) \${dns_server_ext}
590 echo "Executing overcloud deployment, this should run for an extended period without output."
591 sleep 60 #wait for Hypervisor stats to check-in to nova
592 # save deploy command so it can be used for debugging
593 cat > deploy_command << EOF
594 openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
598 if [ "$interactive" == "TRUE" ]; then
599 if ! prompt_user "Overcloud Deployment"; then
600 echo -e "${blue}INFO: User requests exit${reset}"
605 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
607 openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
608 if ! heat stack-list | grep CREATE_COMPLETE 1>/dev/null; then
609 $(typeset -f debug_stack)
616 if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
617 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI || (echo "DPDK config failed, exiting..."; exit 1)
620 for node in \$(nova list | grep novacompute | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
621 echo "Running DPDK test app on \$node"
622 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
624 sudo dpdk_helloworld --no-pci
625 sudo dpdk_nic_bind -s
631 if [ "$debug" == 'TRUE' ]; then
632 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
634 echo "Keystone Endpoint List:"
635 openstack endpoint list
636 echo "Keystone Service List"
637 openstack service list
638 cinder quota-show \$(openstack project list | grep admin | awk {'print \$2'})
643 ##Post configuration after install
645 function configure_post_install {
646 local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip
647 opnfv_attach_networks="admin_network public_network"
649 echo -e "${blue}INFO: Post Install Configuration Running...${reset}"
651 echo -e "${blue}INFO: Configuring ssh for root to overcloud nodes...${reset}"
652 # copy host key to instack
653 scp ${SSH_OPTIONS[@]} /root/.ssh/id_rsa.pub "stack@$UNDERCLOUD":jumphost_id_rsa.pub
655 # add host key to overcloud nodes authorized keys
656 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
658 nodes=\$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
659 for node in \$nodes; do
660 cat ~/jumphost_id_rsa.pub | ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" 'cat >> ~/.ssh/authorized_keys'
664 if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
665 echo -e "${blue}INFO: Bringing up br-phy and ovs-agent for dpdk compute nodes...${reset}"
666 compute_nodes=$(undercloud_connect stack "source stackrc; nova list | grep compute | wc -l")
668 while [ "$i" -lt "$compute_nodes" ]; do
669 overcloud_connect compute${i} "sudo ifup br-phy; sudo systemctl restart neutron-openvswitch-agent"
674 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
677 echo "Configuring Neutron external network"
678 neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }')
679 neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
681 echo "Removing sahara endpoint and service"
682 sahara_service_id=\$(openstack service list | grep sahara | cut -d ' ' -f 2)
683 sahara_endpoint_id=\$(openstack endpoint list | grep sahara | cut -d ' ' -f 2)
684 openstack endpoint delete \$sahara_endpoint_id
685 openstack service delete \$sahara_service_id
687 echo "Removing swift endpoint and service"
688 swift_service_id=\$(openstack service list | grep swift | cut -d ' ' -f 2)
689 swift_endpoint_id=\$(openstack endpoint list | grep swift | cut -d ' ' -f 2)
690 openstack endpoint delete \$swift_endpoint_id
691 openstack service delete \$swift_service_id
693 if [ "${deploy_options_array['congress']}" == 'True' ]; then
694 for s in nova neutronv2 ceilometer cinder glancev2 keystone; do
695 openstack congress datasource create \$s "\$s" \\
696 --config username=\$OS_USERNAME \\
697 --config tenant_name=\$OS_TENANT_NAME \\
698 --config password=\$OS_PASSWORD \\
699 --config auth_url=\$OS_AUTH_URL
704 echo -e "${blue}INFO: Checking if OVS bridges have IP addresses...${reset}"
705 for network in ${opnfv_attach_networks}; do
706 ovs_ip=$(find_ip ${NET_MAP[$network]})
708 if [ -n "$ovs_ip" ]; then
709 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} has IP address ${ovs_ip}${reset}"
711 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} missing IP, will configure${reset}"
712 # use last IP of allocation pool
713 eval "ip_range=\${${network}_usable_ip_range}"
714 ovs_ip=${ip_range##*,}
715 eval "net_cidr=\${${network}_cidr}"
716 sudo ip addr add ${ovs_ip}/${net_cidr##*/} dev ${NET_MAP[$network]}
717 sudo ip link set up ${NET_MAP[$network]}
718 tmp_ip=$(find_ip ${NET_MAP[$network]})
719 if [ -n "$tmp_ip" ]; then
720 echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} IP set: ${tmp_ip}${reset}"
723 echo -e "${red}ERROR: Unable to set OVS Bridge ${NET_MAP[$network]} with IP: ${ovs_ip}${reset}"
729 # for virtual, we NAT public network through Undercloud
730 if [ "$virtual" == "TRUE" ]; then
731 if ! configure_undercloud_nat ${public_network_cidr}; then
732 echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${public_network_cidr}${reset}"
735 echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud public network${reset}"
739 # for sfc deployments we need the vxlan workaround
740 if [ "${deploy_options_array['sfc']}" == 'True' ]; then
741 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
744 for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
745 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
746 sudo ifconfig br-int up
747 sudo ip route add 123.123.123.0/24 dev br-int
753 # Collect deployment logs
754 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
755 mkdir -p ~/deploy_logs
759 for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
760 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
761 sudo cp /var/log/messages /home/heat-admin/messages.log
762 sudo chown heat-admin /home/heat-admin/messages.log
764 scp ${SSH_OPTIONS[@]} heat-admin@\$node:/home/heat-admin/messages.log ~/deploy_logs/\$node.messages.log
765 if [ "$debug" == "TRUE" ]; then
766 nova list --ip \$node
767 echo "---------------------------"
768 echo "-----/var/log/messages-----"
769 echo "---------------------------"
770 cat ~/deploy_logs/\$node.messages.log
771 echo "---------------------------"
772 echo "----------END LOG----------"
773 echo "---------------------------"
775 ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
776 sudo rm -f /home/heat-admin/messages.log
780 # Print out the undercloud IP and dashboard URL
782 echo "Undercloud IP: $UNDERCLOUD, please connect by doing 'opnfv-util undercloud'"
783 echo "Overcloud dashboard available at http://\$(heat output-show overcloud PublicVip | sed 's/"//g')/dashboard"
789 echo -e "Usage:\n$0 [arguments] \n"
790 echo -e " -d|--deploy-settings : Full path to deploy settings yaml file. Optional. Defaults to null"
791 echo -e " -i|--inventory : Full path to inventory yaml file. Required only for baremetal"
792 echo -e " -n|--net-settings : Full path to network settings file. Optional."
793 echo -e " -p|--ping-site : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8"
794 echo -e " -v|--virtual : Virtualize overcloud nodes instead of using baremetal."
795 echo -e " --flat : disable Network Isolation and use a single flat network for the underlay network."
796 echo -e " --no-post-config : disable Post Install configuration."
797 echo -e " --debug : enable debug output."
798 echo -e " --interactive : enable interactive deployment mode which requires user to confirm steps of deployment."
799 echo -e " --virtual-cpus : Number of CPUs to use per Overcloud VM in a virtual deployment (defaults to 4)."
800 echo -e " --virtual-ram : Amount of RAM to use per Overcloud VM in GB (defaults to 8)."
803 ##translates the command line parameters into variables
804 ##params: $@ the entire command line is passed
805 ##usage: parse_cmd_line() "$@"
807 echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
808 echo "Use -h to display help"
811 while [ "${1:0:1}" = "-" ]
818 -d|--deploy-settings)
819 DEPLOY_SETTINGS_FILE=$2
820 echo "Deployment Configuration file: $2"
829 echo "Network Settings Configuration file: $2"
834 echo "Using $2 as the ping site"
839 echo "Executing a Virtual Deployment"
843 net_isolation_enabled="FALSE"
844 echo "Underlay Network Isolation Disabled: using flat configuration"
849 echo "Post install configuration disabled"
854 echo "Enable debug output"
859 echo "Interactive mode enabled"
864 echo "Number of CPUs per VM set to $VM_CPUS"
869 echo "Amount of RAM per VM set to $VM_RAM"
874 echo "Virtual Compute nodes set to $VM_COMPUTES"
884 if [[ ! -z "$NETSETS" && "$net_isolation_enabled" == "FALSE" ]]; then
885 echo -e "${red}INFO: Single flat network requested. Only admin_network settings will be used!${reset}"
886 elif [[ -z "$NETSETS" ]]; then
887 echo -e "${red}ERROR: You must provide a network_settings file with -n.${reset}"
891 if [[ -n "$virtual" && -n "$INVENTORY_FILE" ]]; then
892 echo -e "${red}ERROR: You should not specify an inventory with virtual deployments${reset}"
896 if [[ -z "$DEPLOY_SETTINGS_FILE" || ! -f "$DEPLOY_SETTINGS_FILE" ]]; then
897 echo -e "${red}ERROR: Deploy Settings: ${DEPLOY_SETTINGS_FILE} does not exist! Exiting...${reset}"
901 if [[ ! -z "$NETSETS" && ! -f "$NETSETS" ]]; then
902 echo -e "${red}ERROR: Network Settings: ${NETSETS} does not exist! Exiting...${reset}"
906 if [[ ! -z "$INVENTORY_FILE" && ! -f "$INVENTORY_FILE" ]]; then
907 echo -e "{$red}ERROR: Inventory File: ${INVENTORY_FILE} does not exist! Exiting...${reset}"
911 if [[ -z "$virtual" && -z "$INVENTORY_FILE" ]]; then
912 echo -e "${red}ERROR: You must specify an inventory file for baremetal deployments! Exiting...${reset}"
916 if [[ "$net_isolation_enabled" == "FALSE" && "$post_config" == "TRUE" ]]; then
917 echo -e "${blue}INFO: Post Install Configuration will be skipped. It is not supported with --flat${reset}"
927 echo -e "${blue}INFO: Parsing network settings file...${reset}"
928 parse_network_settings
929 if ! configure_deps; then
930 echo -e "${red}Dependency Validation Failed, Exiting.${reset}"
933 if [ -n "$DEPLOY_SETTINGS_FILE" ]; then
934 echo -e "${blue}INFO: Parsing deploy settings file...${reset}"
935 parse_deploy_settings
938 if [ "$virtual" == "TRUE" ]; then
939 setup_virtual_baremetal $VM_CPUS $VM_RAM
940 elif [ -n "$INVENTORY_FILE" ]; then
944 undercloud_prep_overcloud_deploy
945 if [ "$post_config" == "TRUE" ]; then
946 if ! configure_post_install; then
947 echo -e "${red}ERROR:Post Install Configuration Failed, Exiting.${reset}"
950 echo -e "${blue}INFO: Post Install Configuration Complete${reset}"
953 if [[ "${deploy_options_array['sdn_controller']}" == 'onos' ]]; then
954 if ! onos_update_gw_mac ${public_network_cidr} ${public_network_gateway}; then
955 echo -e "${red}ERROR:ONOS Post Install Configuration Failed, Exiting.${reset}"
958 echo -e "${blue}INFO: ONOS Post Install Configuration Complete${reset}"