3 # Deploy script to install provisioning server for OPNFV Apex
4 # author: Dan Radez (dradez@redhat.com)
5 # author: Tim Rozet (trozet@redhat.com)
7 # Based on RDO Manager http://www.rdoproject.org
10 # - Supports 3 or 4 network interface configuration
11 # - Target system must be RPM based
12 # - Provisioned nodes expected to have following order of network connections (note: not all have to exist, but order is maintained):
14 # eth1- private network (+storage network in 3 NIC config)
15 # eth2- public network
16 # eth3- storage network
17 # - script assumes /24 subnet mask
32 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
34 RESOURCES=/var/opt/opnfv/stack
38 ##verify internet connectivity
40 function verify_internet {
41 if ping -c 2 8.8.8.8 > /dev/null; then
42 if ping -c 2 www.google.com > /dev/null; then
43 echo "${blue}Internet connectivity detected${reset}"
46 echo "${red}Internet connectivity detected, but DNS lookup failed${reset}"
50 echo "${red}No internet connectivity detected${reset}"
55 ##download dependencies if missing and configure host
57 function configure_deps {
58 if ! verify_internet; then
59 echo "${red}Will not download dependencies${reset}"
65 # ensure brbm network is configured
66 systemctl start openvswitch
67 ovs-vsctl list-br | grep brbm > /dev/null || ovs-vsctl add-br brbm
68 virsh net-list --all | grep brbm > /dev/null || virsh net-create $CONFIG/brbm-net.xml
69 virsh net-list | grep -E "brbm\s+active" > /dev/null || virsh net-start brbm
71 # ensure storage pool exists and is started
72 virsh pool-list --all | grep default > /dev/null || virsh pool-create $CONFIG/default-pool.xml
73 virsh pool-list | grep -Eo "default\s+active" > /dev/null || virsh pool-start default
75 if virsh net-list | grep default > /dev/null; then
76 num_ints_same_subnet=$(ip addr show | grep "inet 192.168.122" | wc -l)
77 if [ "$num_ints_same_subnet" -gt 1 ]; then
78 virsh net-destroy default
79 ##go edit /etc/libvirt/qemu/networks/default.xml
80 sed -i 's/192.168.122/192.168.123/g' /etc/libvirt/qemu/networks/default.xml
81 sed -i 's/192.168.122/192.168.123/g' instackenv-virt.json
83 virsh net-start default
84 virsh net-autostart default
88 if ! egrep '^flags.*(vmx|svm)' /proc/cpuinfo > /dev/null; then
89 echo "${red}virtualization extensions not found, kvm kernel module insertion may fail.\n \
90 Are you sure you have enabled vmx in your bios or hypervisor?${reset}"
96 if ! lsmod | grep kvm > /dev/null; then
97 echo "${red}kvm kernel modules not loaded!${reset}"
102 if [ ! -e ~/.ssh/id_rsa.pub ]; then
103 ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
106 echo "${blue}All dependencies installed and running${reset}"
109 ##verify vm exists, an has a dhcp lease assigned to it
111 function setup_instack_vm {
112 if ! virsh list --all | grep instack > /dev/null; then
113 #virsh vol-create default instack.qcow2.xml
114 virsh define $CONFIG/instack.xml
116 #Upload instack image
117 #virsh vol-create default --file instack.qcow2.xml
118 virsh vol-create-as default instack.qcow2 30G --format qcow2
119 virsh vol-upload --pool default --vol instack.qcow2 --file $CONFIG/stack/instack.qcow2
121 sleep 1 # this was to let the copy settle, needed with vol-upload?
124 echo "Found Instack VM, using existing VM"
127 # if the VM is not running update the authkeys and start it
128 if ! virsh list | grep instack > /dev/null; then
129 echo "Injecting ssh key to instack VM"
130 virt-customize -c qemu:///system -d instack --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
131 --run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
132 --run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
133 --run-command "chown stack:stack /home/stack/.ssh/authorized_keys && chmod 600 /home/stack/.ssh/authorized_keys"
137 sleep 3 # let DHCP happen
140 echo -n "${blue}Waiting for instack's dhcp address${reset}"
141 while ! grep instack /var/lib/libvirt/dnsmasq/default.leases > /dev/null && [ $CNT -gt 0 ]; do
147 # get the instack VM IP
148 UNDERCLOUD=$(grep instack /var/lib/libvirt/dnsmasq/default.leases | awk '{print $3}' | head -n 1)
151 echo -en "${blue}\rValidating instack VM connectivity${reset}"
152 while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do
158 while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do
164 # extra space to overwrite the previous connectivity output
165 echo -e "${blue}\rInstack VM has IP $UNDERCLOUD ${reset}"
167 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth1 | grep 192.0.2.1 > /dev/null; then ip a a 192.0.2.1/24 dev eth1; fi"
168 # ssh key fix for stack user
169 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack"
172 ##Create virtual nodes in virsh
174 function setup_virtual_baremetal {
175 for i in $(seq 0 $vm_index); do
176 if ! virsh list --all | grep baremetalbrbm_${i} > /dev/null; then
177 if [ ! -e $CONFIG/baremetalbrbm_${i}.xml ]; then
178 define_virtual_node baremetalbrbm_${i}
180 virsh define $CONFIG/baremetalbrbm_${i}.xml
182 echo "Found Baremetal ${i} VM, using existing VM"
184 virsh vol-list default | grep baremetalbrbm_${i} 2>&1> /dev/null || virsh vol-create-as default baremetalbrbm_${i}.qcow2 40G --format qcow2
188 ##Copy over the glance images and instack json file
190 function copy_materials {
193 echo "Copying configuration file and disk images to instack"
194 scp ${SSH_OPTIONS[@]} $RESOURCES/deploy-ramdisk-ironic.initramfs "stack@$UNDERCLOUD":
195 scp ${SSH_OPTIONS[@]} $RESOURCES/deploy-ramdisk-ironic.kernel "stack@$UNDERCLOUD":
196 scp ${SSH_OPTIONS[@]} $RESOURCES/ironic-python-agent.initramfs "stack@$UNDERCLOUD":
197 scp ${SSH_OPTIONS[@]} $RESOURCES/ironic-python-agent.kernel "stack@$UNDERCLOUD":
198 scp ${SSH_OPTIONS[@]} $RESOURCES/ironic-python-agent.vmlinuz "stack@$UNDERCLOUD":
199 scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.initrd "stack@$UNDERCLOUD":
200 scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.qcow2 "stack@$UNDERCLOUD":
201 scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.vmlinuz "stack@$UNDERCLOUD":
202 scp ${SSH_OPTIONS[@]} $CONFIG/opendaylight.yaml "stack@$UNDERCLOUD":
205 # when OpenDaylight lands in upstream RDO manager this can be removed
206 # apply the opendaylight patch
207 scp ${SSH_OPTIONS[@]} $CONFIG/opendaylight.patch "root@$UNDERCLOUD":
208 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cd /usr/share/openstack-tripleo-heat-templates/; patch -Np1 < /root/opendaylight.patch"
211 # ensure stack user on instack machine has an ssh key
212 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
214 if [ $virtual == "TRUE" ]; then
215 # fix MACs to match new setup
216 for i in $(seq 0 $vm_index); do
217 pyscript="import json
218 data = json.load(open('$CONFIG/instackenv-virt.json'))
219 print data['nodes'][$i]['mac'][0]"
221 old_mac=$(python -c "$pyscript")
222 new_mac=$(virsh dumpxml baremetalbrbm_$i | grep "mac address" | cut -d = -f2 | grep -Eo "[0-9a-f:]+")
223 if [ "$old_mac" != "$new_mac" ]; then
224 echo "${blue}Modifying MAC for node from $old_mac to ${new_mac}${reset}"
225 sed -i 's/'"$old_mac"'/'"$new_mac"'/' $CONFIG/instackenv-virt.json
229 # upload virt json file
230 scp ${SSH_OPTIONS[@]} $CONFIG/instackenv-virt.json "stack@$UNDERCLOUD":instackenv.json
232 # allow stack to control power management on the hypervisor via sshkey
233 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
234 while read -r line; do
235 stack_key=\${stack_key}\\\\\\\\n\${line}
236 done < <(cat ~/.ssh/id_rsa)
237 stack_key=\$(echo \$stack_key | sed 's/\\\\\\\\n//')
238 sed -i 's~INSERT_STACK_USER_PRIV_KEY~'"\$stack_key"'~' instackenv.json
240 DEPLOY_OPTIONS+="--libvirt-type qemu"
242 scp ${SSH_OPTIONS[@]} $CONFIG/instackenv.json "stack@$UNDERCLOUD":
246 # copy stack's ssh key to this users authorized keys
247 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> ~/.ssh/authorized_keys
250 ##preping it for deployment and launch the deploy
252 function undercloud_prep_overcloud_deploy {
253 # check if HA is enabled
254 if [ $ha_enabled == "TRUE" ]; then
255 DEPLOY_OPTIONS+=" --control-scale 3 --compute-scale 2"
256 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml"
257 DEPLOY_OPTIONS+=" --ntp-server pool.ntp.org"
260 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
263 echo "Uploading overcloud glance images"
264 openstack overcloud image upload
265 echo "Configuring undercloud and discovering nodes"
266 openstack baremetal import --json instackenv.json
267 openstack baremetal configure boot
268 openstack baremetal introspection bulk start
269 echo "Configuring flavors"
270 openstack flavor list | grep baremetal || openstack flavor create --id auto --ram 4096 --disk 39 --vcpus 1 baremetal
271 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal
272 echo "Configuring nameserver on ctlplane network"
273 neutron subnet-update \$(neutron subnet-list | grep -v id | grep -v \\\\-\\\\- | awk {'print \$2'}) --dns-nameserver 8.8.8.8
274 echo "Executing overcloud deployment, this should run for an extended period without output."
275 sleep 60 #wait for Hypervisor stats to check-in to nova
276 openstack overcloud deploy --templates $DEPLOY_OPTIONS -e opendaylight.yaml
282 echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
283 echo -e "\nUsage:\n$0 [arguments] \n"
284 echo -e "\n -c|--config : Full path of settings file to parse. Optional. Will provide a new base settings file rather than the default. Example: --config /opt/myinventory.yml \n"
285 echo -e "\n -r|--resources : Full path of settings file to parse. Optional. Will provide a new base settings file rather than the default. Example: --config /opt/myinventory.yml \n"
286 echo -e "\n -v|--virtual : Virtualize compute nodes instead of using baremetal. \n"
287 echo -e "\n -p|--ping-site : site to use to verify IP connectivity from the VM when -virtual is used. Format: -ping_site www.blah.com \n"
288 echo -e "\n -n|--no-ha : disable High Availablility deploymnet scheme, this assumes a single controller and single compute node \n"
291 ##translates the command line paramaters into variables
292 ##params: $@ the entire command line is passed
293 ##usage: parse_cmd_line() "$@"
295 echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
296 echo "Use -h to display help"
299 while [ "${1:0:1}" = "-" ]
341 if [ $virtual == "TRUE" ]; then
342 setup_virtual_baremetal
345 undercloud_prep_overcloud_deploy