3 # Deploy script to install provisioning server for OPNFV Apex
4 # author: Dan Radez (dradez@redhat.com)
5 # author: Tim Rozet (trozet@redhat.com)
7 # Based on RDO Manager http://www.rdoproject.org
10 # - Supports 3 or 4 network interface configuration
11 # - Target system must be RPM based
12 # - Provisioned nodes expected to have following order of network connections (note: not all have to exist, but order is maintained):
14 # eth1- private network (+storage network in 3 NIC config)
15 # eth2- public network
16 # eth3- storage network
17 # - script assumes /24 subnet mask
32 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
34 RESOURCES=/var/opt/opnfv/stack
36 INSTACKENV=$CONFIG/instackenv.json
37 NETENV=$CONFIG/network-environment.yaml
40 ##verify internet connectivity
42 function verify_internet {
43 if ping -c 2 8.8.8.8 > /dev/null; then
44 if ping -c 2 www.google.com > /dev/null; then
45 echo "${blue}Internet connectivity detected${reset}"
48 echo "${red}Internet connectivity detected, but DNS lookup failed${reset}"
52 echo "${red}No internet connectivity detected${reset}"
57 ##download dependencies if missing and configure host
59 function configure_deps {
60 if ! verify_internet; then
61 echo "${red}Will not download dependencies${reset}"
67 # ensure brbm networks are configured
68 systemctl start openvswitch
69 ovs-vsctl list-br | grep brbm > /dev/null || ovs-vsctl add-br brbm
70 virsh net-list --all | grep brbm > /dev/null || virsh net-create $CONFIG/brbm-net.xml
71 virsh net-list | grep -E "brbm\s+active" > /dev/null || virsh net-start brbm
72 ovs-vsctl list-br | grep brbm1 > /dev/null || ovs-vsctl add-br brbm1
73 virsh net-list --all | grep brbm1 > /dev/null || virsh net-create $CONFIG/brbm1-net.xml
74 virsh net-list | grep -E "brbm1\s+active" > /dev/null || virsh net-start brbm1
76 # ensure storage pool exists and is started
77 virsh pool-list --all | grep default > /dev/null || virsh pool-create $CONFIG/default-pool.xml
78 virsh pool-list | grep -Eo "default\s+active" > /dev/null || virsh pool-start default
80 if virsh net-list | grep default > /dev/null; then
81 num_ints_same_subnet=$(ip addr show | grep "inet 192.168.122" | wc -l)
82 if [ "$num_ints_same_subnet" -gt 1 ]; then
83 virsh net-destroy default
84 ##go edit /etc/libvirt/qemu/networks/default.xml
85 sed -i 's/192.168.122/192.168.123/g' /etc/libvirt/qemu/networks/default.xml
86 sed -i 's/192.168.122/192.168.123/g' instackenv-virt.json
88 virsh net-start default
89 virsh net-autostart default
93 if ! egrep '^flags.*(vmx|svm)' /proc/cpuinfo > /dev/null; then
94 echo "${red}virtualization extensions not found, kvm kernel module insertion may fail.\n \
95 Are you sure you have enabled vmx in your bios or hypervisor?${reset}"
101 if ! lsmod | grep kvm > /dev/null; then
102 echo "${red}kvm kernel modules not loaded!${reset}"
107 if [ ! -e ~/.ssh/id_rsa.pub ]; then
108 ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
111 echo "${blue}All dependencies installed and running${reset}"
114 ##verify vm exists, an has a dhcp lease assigned to it
116 function setup_instack_vm {
117 if ! virsh list --all | grep instack > /dev/null; then
118 #virsh vol-create default instack.qcow2.xml
119 virsh define $CONFIG/instack.xml
121 #Upload instack image
122 #virsh vol-create default --file instack.qcow2.xml
123 virsh vol-create-as default instack.qcow2 30G --format qcow2
124 virsh vol-upload --pool default --vol instack.qcow2 --file $CONFIG/stack/instack.qcow2
126 sleep 1 # this was to let the copy settle, needed with vol-upload?
129 echo "Found Instack VM, using existing VM"
132 # if the VM is not running update the authkeys and start it
133 if ! virsh list | grep instack > /dev/null; then
134 echo "Injecting ssh key to instack VM"
135 virt-customize -c qemu:///system -d instack --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
136 --run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
137 --run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
138 --run-command "chown stack:stack /home/stack/.ssh/authorized_keys && chmod 600 /home/stack/.ssh/authorized_keys"
142 sleep 3 # let DHCP happen
145 echo -n "${blue}Waiting for instack's dhcp address${reset}"
146 while ! grep instack /var/lib/libvirt/dnsmasq/default.leases > /dev/null && [ $CNT -gt 0 ]; do
152 # get the instack VM IP
153 UNDERCLOUD=$(grep instack /var/lib/libvirt/dnsmasq/default.leases | awk '{print $3}' | head -n 1)
156 echo -en "${blue}\rValidating instack VM connectivity${reset}"
157 while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do
163 while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do
169 # extra space to overwrite the previous connectivity output
170 echo -e "${blue}\rInstack VM has IP $UNDERCLOUD ${reset}"
172 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth1 | grep 192.0.2.1 > /dev/null; then ip a a 192.0.2.1/24 dev eth1; fi"
174 #add the instack brbm1 interface
175 virsh attach-interface --domain instack --type network --source brbm1 --model rtl8139 --config --live
177 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth2 | grep 192.168.37.1 > /dev/null; then ip a a 192.168.37.1/24 dev eth2; ip link set up dev eth2; fi"
179 # ssh key fix for stack user
180 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack"
183 ##Create virtual nodes in virsh
185 function setup_virtual_baremetal {
186 for i in $(seq 0 $vm_index); do
187 if ! virsh list --all | grep baremetalbrbm_brbm1_${i} > /dev/null; then
188 if [ ! -e $CONFIG/baremetalbrbm_brbm1_${i}.xml ]; then
189 define_virtual_node baremetalbrbm_brbm1_${i}
191 virsh define $CONFIG/baremetalbrbm_brbm1_${i}.xml
193 echo "Found Baremetal ${i} VM, using existing VM"
195 virsh vol-list default | grep baremetalbrbm_brbm1_${i} 2>&1> /dev/null || virsh vol-create-as default baremetalbrbm_brbm1_${i}.qcow2 40G --format qcow2
199 ##Copy over the glance images and instack json file
201 function copy_materials {
204 echo "Copying configuration file and disk images to instack"
205 scp ${SSH_OPTIONS[@]} $RESOURCES/deploy-ramdisk-ironic.initramfs "stack@$UNDERCLOUD":
206 scp ${SSH_OPTIONS[@]} $RESOURCES/deploy-ramdisk-ironic.kernel "stack@$UNDERCLOUD":
207 scp ${SSH_OPTIONS[@]} $RESOURCES/ironic-python-agent.initramfs "stack@$UNDERCLOUD":
208 scp ${SSH_OPTIONS[@]} $RESOURCES/ironic-python-agent.kernel "stack@$UNDERCLOUD":
209 scp ${SSH_OPTIONS[@]} $RESOURCES/ironic-python-agent.vmlinuz "stack@$UNDERCLOUD":
210 scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.initrd "stack@$UNDERCLOUD":
211 scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.qcow2 "stack@$UNDERCLOUD":
212 scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.vmlinuz "stack@$UNDERCLOUD":
213 scp ${SSH_OPTIONS[@]} $NETENV "stack@$UNDERCLOUD":
214 scp ${SSH_OPTIONS[@]} $CONFIG/opendaylight.yaml "stack@$UNDERCLOUD":
215 scp ${SSH_OPTIONS[@]} -r $CONFIG/nics/ "stack@$UNDERCLOUD":
218 # when OpenDaylight lands in upstream RDO manager this can be removed
219 # apply the opendaylight patch
220 scp ${SSH_OPTIONS[@]} $CONFIG/opendaylight.patch "root@$UNDERCLOUD":
221 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cd /usr/share/openstack-tripleo-heat-templates/; patch -Np1 < /root/opendaylight.patch"
224 # ensure stack user on instack machine has an ssh key
225 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
227 if [ $virtual == "TRUE" ]; then
228 # fix MACs to match new setup
229 for i in $(seq 0 $vm_index); do
230 pyscript="import json
231 data = json.load(open('$CONFIG/instackenv-virt.json'))
232 print data['nodes'][$i]['mac'][0]"
234 old_mac=$(python -c "$pyscript")
235 new_mac=$(virsh dumpxml baremetalbrbm_brbm1_$i | grep "mac address" | cut -d = -f2 | grep -Eo "[0-9a-f:]+")
236 # this doesn't work with multiple vnics on the vms
237 #if [ "$old_mac" != "$new_mac" ]; then
238 # echo "${blue}Modifying MAC for node from $old_mac to ${new_mac}${reset}"
239 # sed -i 's/'"$old_mac"'/'"$new_mac"'/' $CONFIG/instackenv-virt.json
243 DEPLOY_OPTIONS+="--libvirt-type qemu"
244 INSTACKENV=$CONFIG/instackenv-virt.json
245 NETENV=$CONFIG/network-environment.yaml
248 # upload instackenv file to Instack
249 scp ${SSH_OPTIONS[@]} $INSTACKENV "stack@$UNDERCLOUD":instackenv.json
252 # allow stack to control power management on the hypervisor via sshkey
253 # only if this is a virtual deployment
254 if [ $virtual == "TRUE" ]; then
255 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
256 while read -r line; do
257 stack_key=\${stack_key}\\\\\\\\n\${line}
258 done < <(cat ~/.ssh/id_rsa)
259 stack_key=\$(echo \$stack_key | sed 's/\\\\\\\\n//')
260 sed -i 's~INSERT_STACK_USER_PRIV_KEY~'"\$stack_key"'~' instackenv.json
264 # copy stack's ssh key to this users authorized keys
265 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> ~/.ssh/authorized_keys
268 ##preping it for deployment and launch the deploy
270 function undercloud_prep_overcloud_deploy {
271 # check if HA is enabled
272 if [ $ha_enabled == "TRUE" ]; then
273 DEPLOY_OPTIONS+=" --control-scale 3 --compute-scale 2"
274 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml"
275 DEPLOY_OPTIONS+=" --ntp-server pool.ntp.org"
278 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
281 echo "Uploading overcloud glance images"
282 openstack overcloud image upload
283 echo "Configuring undercloud and discovering nodes"
284 openstack baremetal import --json instackenv.json
285 openstack baremetal configure boot
286 openstack baremetal introspection bulk start
287 echo "Configuring flavors"
288 openstack flavor list | grep baremetal || openstack flavor create --id auto --ram 4096 --disk 39 --vcpus 1 baremetal
289 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal
290 echo "Configuring nameserver on ctlplane network"
291 neutron subnet-update \$(neutron subnet-list | grep -v id | grep -v \\\\-\\\\- | awk {'print \$2'}) --dns-nameserver 8.8.8.8
292 echo "Executing overcloud deployment, this should run for an extended period without output."
293 sleep 60 #wait for Hypervisor stats to check-in to nova
294 openstack overcloud deploy --templates $DEPLOY_OPTIONS -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml -e network-environment.yaml -e opendaylight.yaml
300 echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
301 echo -e "\nUsage:\n$0 [arguments] \n"
302 echo -e "\n -c|--config : Directory to configuration files. Optional. Defaults to /var/opt/opnfv/ \n"
303 echo -e "\n -i|--instackenv : Full path to instack environment file. Optional. Defaults to \$CONFIG/instackenv.json \n"
304 echo -e "\n -n|--netenv : Full path to network environment file. Optional. Defaults to \$CONFIG/network-environment.json \n"
305 echo -e "\n -p|--ping-site : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8 \n"
306 echo -e "\n -r|--resources : Directory to deployment resources. Optional. Defaults to /var/opt/opnfv/stack \n"
307 echo -e "\n -v|--virtual : Virtualize overcloud nodes instead of using baremetal. \n"
308 echo -e "\n --no-ha : disable High Availablility deployment scheme, this assumes a single controller and single compute node \n"
311 ##translates the command line paramaters into variables
312 ##params: $@ the entire command line is passed
313 ##usage: parse_cmd_line() "$@"
315 echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
316 echo "Use -h to display help"
319 while [ "${1:0:1}" = "-" ]
328 echo "Deployment Configuration Directory Overridden to: $2"
341 echo "Using $2 as the ping site"
346 echo "Deployment Resources Directory Overridden to: $2"
351 echo "Executing a Virtualized Deployment"
356 echo "HA Deployment Disabled"
373 if [ $virtual == "TRUE" ]; then
374 setup_virtual_baremetal
377 undercloud_prep_overcloud_deploy