3 # Deploy script to install provisioning server for OPNFV Apex
4 # author: Dan Radez (dradez@redhat.com)
5 # author: Tim Rozet (trozet@redhat.com)
7 # Based on RDO Manager http://www.rdoproject.org
10 # - Supports 3 or 4 network interface configuration
11 # - Target system must be RPM based
12 # - Provisioned nodes expected to have following order of network connections (note: not all have to exist, but order is maintained):
14 # eth1- private network (+storage network in 3 NIC config)
15 # eth2- public network
16 # eth3- storage network
17 # - script assumes /24 subnet mask
32 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
34 RESOURCES=/var/opt/opnfv/stack
38 ##verify internet connectivity
40 function verify_internet {
41 if ping -c 2 8.8.8.8 > /dev/null; then
42 if ping -c 2 www.google.com > /dev/null; then
43 echo "${blue}Internet connectivity detected${reset}"
46 echo "${red}Internet connectivity detected, but DNS lookup failed${reset}"
50 echo "${red}No internet connectivity detected${reset}"
55 ##download dependencies if missing and configure host
57 function configure_deps {
58 if ! verify_internet; then
59 echo "${red}Will not download dependencies${reset}"
65 # ensure brbm networks are configured
66 systemctl start openvswitch
67 ovs-vsctl list-br | grep brbm > /dev/null || ovs-vsctl add-br brbm
68 virsh net-list --all | grep brbm > /dev/null || virsh net-create $CONFIG/brbm-net.xml
69 virsh net-list | grep -E "brbm\s+active" > /dev/null || virsh net-start brbm
70 ovs-vsctl list-br | grep brbm1 > /dev/null || ovs-vsctl add-br brbm1
71 virsh net-list --all | grep brbm1 > /dev/null || virsh net-create $CONFIG/brbm1-net.xml
72 virsh net-list | grep -E "brbm1\s+active" > /dev/null || virsh net-start brbm1
74 # ensure storage pool exists and is started
75 virsh pool-list --all | grep default > /dev/null || virsh pool-create $CONFIG/default-pool.xml
76 virsh pool-list | grep -Eo "default\s+active" > /dev/null || virsh pool-start default
78 if virsh net-list | grep default > /dev/null; then
79 num_ints_same_subnet=$(ip addr show | grep "inet 192.168.122" | wc -l)
80 if [ "$num_ints_same_subnet" -gt 1 ]; then
81 virsh net-destroy default
82 ##go edit /etc/libvirt/qemu/networks/default.xml
83 sed -i 's/192.168.122/192.168.123/g' /etc/libvirt/qemu/networks/default.xml
84 sed -i 's/192.168.122/192.168.123/g' instackenv-virt.json
86 virsh net-start default
87 virsh net-autostart default
91 if ! egrep '^flags.*(vmx|svm)' /proc/cpuinfo > /dev/null; then
92 echo "${red}virtualization extensions not found, kvm kernel module insertion may fail.\n \
93 Are you sure you have enabled vmx in your bios or hypervisor?${reset}"
99 if ! lsmod | grep kvm > /dev/null; then
100 echo "${red}kvm kernel modules not loaded!${reset}"
105 if [ ! -e ~/.ssh/id_rsa.pub ]; then
106 ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
109 echo "${blue}All dependencies installed and running${reset}"
112 ##verify vm exists, an has a dhcp lease assigned to it
114 function setup_instack_vm {
115 if ! virsh list --all | grep instack > /dev/null; then
116 #virsh vol-create default instack.qcow2.xml
117 virsh define $CONFIG/instack.xml
119 #Upload instack image
120 #virsh vol-create default --file instack.qcow2.xml
121 virsh vol-create-as default instack.qcow2 30G --format qcow2
122 virsh vol-upload --pool default --vol instack.qcow2 --file $CONFIG/stack/instack.qcow2
124 sleep 1 # this was to let the copy settle, needed with vol-upload?
127 echo "Found Instack VM, using existing VM"
130 # if the VM is not running update the authkeys and start it
131 if ! virsh list | grep instack > /dev/null; then
132 echo "Injecting ssh key to instack VM"
133 virt-customize -c qemu:///system -d instack --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
134 --run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
135 --run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
136 --run-command "chown stack:stack /home/stack/.ssh/authorized_keys && chmod 600 /home/stack/.ssh/authorized_keys"
140 sleep 3 # let DHCP happen
143 echo -n "${blue}Waiting for instack's dhcp address${reset}"
144 while ! grep instack /var/lib/libvirt/dnsmasq/default.leases > /dev/null && [ $CNT -gt 0 ]; do
150 # get the instack VM IP
151 UNDERCLOUD=$(grep instack /var/lib/libvirt/dnsmasq/default.leases | awk '{print $3}' | head -n 1)
154 echo -en "${blue}\rValidating instack VM connectivity${reset}"
155 while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do
161 while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do
167 # extra space to overwrite the previous connectivity output
168 echo -e "${blue}\rInstack VM has IP $UNDERCLOUD ${reset}"
170 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth1 | grep 192.0.2.1 > /dev/null; then ip a a 192.0.2.1/24 dev eth1; fi"
172 #add the instack brbm1 interface
173 virsh attach-interface --domain instack --type network --source brbm1 --model rtl8139 --config --live
175 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth2 | grep 192.168.37.1 > /dev/null; then ip a a 192.168.37.252/24 dev eth2; ip link set up dev eth2; fi"
177 # ssh key fix for stack user
178 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack"
181 ##Create virtual nodes in virsh
183 function setup_virtual_baremetal {
184 for i in $(seq 0 $vm_index); do
185 if ! virsh list --all | grep baremetalbrbm_brbm1_${i} > /dev/null; then
186 if [ ! -e $CONFIG/baremetalbrbm_brbm1_${i}.xml ]; then
187 define_virtual_node baremetalbrbm_brbm1_${i}
189 virsh define $CONFIG/baremetalbrbm_brbm1_${i}.xml
191 echo "Found Baremetal ${i} VM, using existing VM"
193 virsh vol-list default | grep baremetalbrbm_brbm1_${i} 2>&1> /dev/null || virsh vol-create-as default baremetalbrbm_brbm1_${i}.qcow2 40G --format qcow2
197 ##Copy over the glance images and instack json file
199 function copy_materials {
202 echo "Copying configuration file and disk images to instack"
203 scp ${SSH_OPTIONS[@]} $RESOURCES/deploy-ramdisk-ironic.initramfs "stack@$UNDERCLOUD":
204 scp ${SSH_OPTIONS[@]} $RESOURCES/deploy-ramdisk-ironic.kernel "stack@$UNDERCLOUD":
205 scp ${SSH_OPTIONS[@]} $RESOURCES/ironic-python-agent.initramfs "stack@$UNDERCLOUD":
206 scp ${SSH_OPTIONS[@]} $RESOURCES/ironic-python-agent.kernel "stack@$UNDERCLOUD":
207 scp ${SSH_OPTIONS[@]} $RESOURCES/ironic-python-agent.vmlinuz "stack@$UNDERCLOUD":
208 scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.initrd "stack@$UNDERCLOUD":
209 scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.qcow2 "stack@$UNDERCLOUD":
210 scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.vmlinuz "stack@$UNDERCLOUD":
211 scp ${SSH_OPTIONS[@]} $CONFIG/network-environment.yaml "stack@$UNDERCLOUD":
212 scp ${SSH_OPTIONS[@]} $CONFIG/opendaylight.yaml "stack@$UNDERCLOUD":
213 scp ${SSH_OPTIONS[@]} -r $CONFIG/nics/ "stack@$UNDERCLOUD":
216 # when OpenDaylight lands in upstream RDO manager this can be removed
217 # apply the opendaylight patch
218 scp ${SSH_OPTIONS[@]} $CONFIG/opendaylight.patch "root@$UNDERCLOUD":
219 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cd /usr/share/openstack-tripleo-heat-templates/; patch -Np1 < /root/opendaylight.patch"
222 # ensure stack user on instack machine has an ssh key
223 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
225 if [ $virtual == "TRUE" ]; then
226 # fix MACs to match new setup
227 for i in $(seq 0 $vm_index); do
228 pyscript="import json
229 data = json.load(open('$CONFIG/instackenv-virt.json'))
230 print data['nodes'][$i]['mac'][0]"
232 old_mac=$(python -c "$pyscript")
233 new_mac=$(virsh dumpxml baremetalbrbm_brbm1_$i | grep "mac address" | cut -d = -f2 | grep -Eo "[0-9a-f:]+")
234 # this doesn't work with multiple vnics on the vms
235 #if [ "$old_mac" != "$new_mac" ]; then
236 # echo "${blue}Modifying MAC for node from $old_mac to ${new_mac}${reset}"
237 # sed -i 's/'"$old_mac"'/'"$new_mac"'/' $CONFIG/instackenv-virt.json
241 # upload virt json file
242 scp ${SSH_OPTIONS[@]} $CONFIG/instackenv-virt.json "stack@$UNDERCLOUD":instackenv.json
244 # allow stack to control power management on the hypervisor via sshkey
245 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
246 while read -r line; do
247 stack_key=\${stack_key}\\\\\\\\n\${line}
248 done < <(cat ~/.ssh/id_rsa)
249 stack_key=\$(echo \$stack_key | sed 's/\\\\\\\\n//')
250 sed -i 's~INSERT_STACK_USER_PRIV_KEY~'"\$stack_key"'~' instackenv.json
252 DEPLOY_OPTIONS+="--libvirt-type qemu"
254 scp ${SSH_OPTIONS[@]} $CONFIG/instackenv.json "stack@$UNDERCLOUD":
258 # copy stack's ssh key to this users authorized keys
259 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> ~/.ssh/authorized_keys
262 ##preping it for deployment and launch the deploy
264 function undercloud_prep_overcloud_deploy {
265 # check if HA is enabled
266 if [ $ha_enabled == "TRUE" ]; then
267 DEPLOY_OPTIONS+=" --control-scale 3 --compute-scale 2"
268 DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml"
269 DEPLOY_OPTIONS+=" --ntp-server pool.ntp.org"
272 ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
275 echo "Uploading overcloud glance images"
276 openstack overcloud image upload
277 echo "Configuring undercloud and discovering nodes"
278 openstack baremetal import --json instackenv.json
279 openstack baremetal configure boot
280 openstack baremetal introspection bulk start
281 echo "Configuring flavors"
282 openstack flavor list | grep baremetal || openstack flavor create --id auto --ram 4096 --disk 39 --vcpus 1 baremetal
283 openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal
284 echo "Configuring nameserver on ctlplane network"
285 neutron subnet-update \$(neutron subnet-list | grep -v id | grep -v \\\\-\\\\- | awk {'print \$2'}) --dns-nameserver 8.8.8.8
286 echo "Executing overcloud deployment, this should run for an extended period without output."
287 sleep 60 #wait for Hypervisor stats to check-in to nova
288 openstack overcloud deploy --templates $DEPLOY_OPTIONS -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml -e network-environment.yaml -e opendaylight.yaml
294 echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
295 echo -e "\nUsage:\n$0 [arguments] \n"
296 echo -e "\n -c|--config : Full path of settings file to parse. Optional. Will provide a new base settings file rather than the default. Example: --config /opt/myinventory.yml \n"
297 echo -e "\n -r|--resources : Full path of settings file to parse. Optional. Will provide a new base settings file rather than the default. Example: --config /opt/myinventory.yml \n"
298 echo -e "\n -v|--virtual : Virtualize compute nodes instead of using baremetal. \n"
299 echo -e "\n -p|--ping-site : site to use to verify IP connectivity from the VM when -virtual is used. Format: -ping_site www.blah.com \n"
300 echo -e "\n -n|--no-ha : disable High Availablility deploymnet scheme, this assumes a single controller and single compute node \n"
303 ##translates the command line paramaters into variables
304 ##params: $@ the entire command line is passed
305 ##usage: parse_cmd_line() "$@"
307 echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
308 echo "Use -h to display help"
311 while [ "${1:0:1}" = "-" ]
353 if [ $virtual == "TRUE" ]; then
354 setup_virtual_baremetal
357 undercloud_prep_overcloud_deploy