Adding SDNVPN support
[apex.git] / build / instack.sh
1 #!/bin/sh
2 ##############################################################################
3 # Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
10 set -e
11 declare -i CNT
12
13 #rdo_images_uri=https://repos.fedorapeople.org/repos/openstack-m/rdo-images-centos-liberty-opnfv
14 rdo_images_uri=file:///stable-images
15 onos_artifacts_uri=file:///stable-images/onos
16
17 vm_index=4
18 RDO_RELEASE=liberty
19 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null)
20 OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network"
21
22 # check for dependancy packages
23 for i in rpm-build createrepo libguestfs-tools python-docutils bsdtar; do
24     if ! rpm -q $i > /dev/null; then
25         sudo yum install -y $i
26     fi
27 done
28
29 # RDO Manager expects a stack user to exist, this checks for one
30 # and creates it if you are root
31 if ! id stack > /dev/null; then
32     sudo useradd stack;
33     sudo echo 'stack ALL=(root) NOPASSWD:ALL' | sudo tee -a /etc/sudoers.d/stack
34     sudo echo 'Defaults:stack !requiretty' | sudo tee -a /etc/sudoers.d/stack
35     sudo chmod 0440 /etc/sudoers.d/stack
36     echo 'Added user stack'
37 fi
38
39 # ensure that I can ssh as the stack user
40 if ! sudo grep "$(cat ~/.ssh/id_rsa.pub)" /home/stack/.ssh/authorized_keys; then
41     if ! sudo ls -d /home/stack/.ssh/ ; then
42         sudo mkdir /home/stack/.ssh
43         sudo chown stack:stack /home/stack/.ssh
44         sudo chmod 700 /home/stack/.ssh
45     fi
46     USER=$(whoami) sudo sh -c "cat ~$USER/.ssh/id_rsa.pub >> /home/stack/.ssh/authorized_keys"
47     sudo chown stack:stack /home/stack/.ssh/authorized_keys
48 fi
49
50 # clean up stack user previously build instack disk images
51 ssh -T ${SSH_OPTIONS[@]} stack@localhost "rm -f instack*.qcow2"
52
53 # Yum repo setup for building the undercloud
54 if ! rpm -q rdo-release > /dev/null && [ "$1" != "-master" ]; then
55     #pulling from current-passed-ci instead of release repos
56     #sudo yum install -y https://rdoproject.org/repos/openstack-${RDO_RELEASE}/rdo-release-${RDO_RELEASE}.rpm
57     sudo yum -y install yum-plugin-priorities
58     sudo yum-config-manager --disable openstack-${RDO_RELEASE}
59     sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
60     sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
61     sudo rm -f /etc/yum.repos.d/delorean-current.repo
62 elif [ "$1" == "-master" ]; then
63     sudo yum -y install yum-plugin-priorities
64     sudo yum-config-manager --disable openstack-${RDO_RELEASE}
65     sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7/current-passed-ci/delorean.repo
66     sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
67     sudo rm -f /etc/yum.repos.d/delorean-current.repo
68 fi
69
70 # ensure the undercloud package is installed so we can build the undercloud
71 if ! rpm -q instack-undercloud > /dev/null; then
72     sudo yum install -y python-tripleoclient
73 fi
74
75 # ensure openvswitch is installed
76 if ! rpm -q openvswitch > /dev/null; then
77     sudo yum install -y openvswitch
78 fi
79
80 # ensure libvirt is installed
81 if ! rpm -q libvirt-daemon-kvm > /dev/null; then
82     sudo yum install -y libvirt-daemon-kvm
83 fi
84
85 # clean this up incase it's there
86 sudo rm -f /tmp/instack.answers
87
88 # ensure that no previous undercloud VMs are running
89 sudo ../ci/clean.sh
90 # and rebuild the bare undercloud VMs
91 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
92 set -e
93 NODE_COUNT=5 NODE_CPU=2 NODE_MEM=8192 TESTENV_ARGS="--baremetal-bridge-names 'brbm brbm1 brbm2 brbm3'" instack-virt-setup
94 EOI
95
96 # let dhcp happen so we can get the ip
97 # just wait instead of checking until we see an address
98 # because there may be a previous lease that needs
99 # to be cleaned up
100 sleep 5
101
102 # get the undercloud ip address
103 UNDERCLOUD=$(grep instack /var/lib/libvirt/dnsmasq/default.leases | awk '{print $3}' | head -n 1)
104 if [ -z "$UNDERCLOUD" ]; then
105   #if not found then dnsmasq may be using leasefile-ro
106   instack_mac=$(ssh -T ${SSH_OPTIONS[@]} stack@localhost "virsh domiflist instack" | grep default | \
107                 grep -Eo "[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+")
108   UNDERCLOUD=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
109
110   if [ -z "$UNDERCLOUD" ]; then
111     echo "\n\nNever got IP for Instack. Can Not Continue."
112     exit 1
113   else
114     echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}"
115   fi
116 else
117    echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}"
118 fi
119
120 # ensure that we can ssh to the undercloud
121 CNT=10
122 while ! ssh -T ${SSH_OPTIONS[@]}  "root@$UNDERCLOUD" "echo ''" > /dev/null && [ $CNT -gt 0 ]; do
123     echo -n "."
124     sleep 3
125     CNT=CNT-1
126 done
127 # TODO fail if CNT=0
128
129 # yum repo, triple-o package and ssh key setup for the undercloud
130 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
131 set -e
132
133 if ! rpm -q epel-release > /dev/null; then
134     yum install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
135 fi
136
137 yum -y install yum-plugin-priorities
138 curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
139 curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
140
141 cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys
142 chown stack:stack /home/stack/.ssh/authorized_keys
143 EOI
144
145 # copy instackenv file for future virt deployments
146 if [ ! -d stack ]; then mkdir stack; fi
147 scp ${SSH_OPTIONS[@]} stack@$UNDERCLOUD:instackenv.json stack/instackenv.json
148
149 # make a copy of instack VM's definitions, and disk image
150 # it must be stopped to make a copy of its disk image
151 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
152 set -e
153 echo "Shutting down instack to gather configs"
154 virsh shutdown instack
155
156 echo "Waiting for instack VM to shutdown"
157 CNT=20
158 while virsh list | grep instack > /dev/null && [ $CNT -gt 0 ]; do
159     echo -n "."
160     sleep 5
161     CNT=CNT-1
162 done
163 if virsh list | grep instack > /dev/null; then
164     echo "instack failed to shutdown for copy"
165     exit 1
166 fi
167
168 echo $'\nGenerating libvirt configuration'
169 for i in \$(seq 0 $vm_index); do
170   virsh dumpxml baremetalbrbm_brbm1_brbm2_brbm3_\$i | awk '/model type='\''virtio'\''/{c++;if(c==2){sub("model type='\''virtio'\''","model type='\''rtl8139'\''");c=0}}1' > baremetalbrbm_brbm1_brbm2_brbm3_\$i.xml
171 done
172
173 virsh dumpxml instack > instack.xml
174 virsh net-dumpxml brbm > brbm-net.xml
175 virsh net-dumpxml brbm1 > brbm1-net.xml
176 virsh net-dumpxml brbm2> brbm2-net.xml
177 virsh net-dumpxml brbm3 > brbm3-net.xml
178 virsh pool-dumpxml default > default-pool.xml
179 EOI
180
181 # copy off the instack artifacts
182 echo "Copying instack files to build directory"
183 for i in $(seq 0 $vm_index); do
184   scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml .
185 done
186
187 scp ${SSH_OPTIONS[@]} stack@localhost:instack.xml .
188 scp ${SSH_OPTIONS[@]} stack@localhost:brbm-net.xml .
189 scp ${SSH_OPTIONS[@]} stack@localhost:brbm1-net.xml .
190 scp ${SSH_OPTIONS[@]} stack@localhost:brbm2-net.xml .
191 scp ${SSH_OPTIONS[@]} stack@localhost:brbm3-net.xml .
192 scp ${SSH_OPTIONS[@]} stack@localhost:default-pool.xml .
193
194 # pull down the the built images
195 echo "Copying overcloud resources"
196 IMAGES="overcloud-full.tar"
197 IMAGES+=" undercloud.qcow2"
198
199 for i in $IMAGES; do
200   # download prebuilt images from RDO Project
201   if [ ! -f stack/$i ] || [ "$(curl -L $rdo_images_uri/${i}.md5 | awk {'print $1'})" != "$(md5sum stack/$i | awk {'print $1'})" ] ; then
202     #if [ $i == "undercloud.qcow2" ]; then
203     ### there's a problem with the Content-Length reported by the centos artifacts
204     ### server so using wget for it until a resolution is figured out.
205     #wget -nv -O stack/$i $rdo_images_uri/$i
206     #else
207     curl $rdo_images_uri/$i -o stack/$i
208     #fi
209   fi
210   # only untar the tar files
211   if [ "${i##*.}" == "tar" ]; then tar -xf stack/$i -C stack/; fi
212 done
213
214 #Adding OpenStack packages to undercloud
215 pushd stack
216 cp undercloud.qcow2 instack.qcow2
217 LIBGUESTFS_BACKEND=direct virt-customize --install yum-priorities -a instack.qcow2
218 PACKAGES="qemu-kvm-common,qemu-kvm,libvirt-daemon-kvm,libguestfs,python-libguestfs,openstack-nova-compute"
219 PACKAGES+=",openstack-swift,openstack-ceilometer-api,openstack-neutron-ml2,openstack-ceilometer-alarm"
220 PACKAGES+=",openstack-nova-conductor,openstack-ironic-inspector,openstack-ironic-api,python-openvswitch"
221 PACKAGES+=",openstack-glance,python-glance,python-troveclient,openstack-puppet-modules"
222 PACKAGES+=",openstack-neutron,openstack-neutron-openvswitch,openstack-nova-scheduler,openstack-keystone,openstack-swift-account"
223 PACKAGES+=",openstack-swift-container,openstack-swift-object,openstack-swift-plugin-swift3,openstack-swift-proxy"
224 PACKAGES+=",openstack-nova-api,openstack-nova-cert,openstack-heat-api-cfn,openstack-heat-api,"
225 PACKAGES+=",openstack-ceilometer-central,openstack-ceilometer-polling,openstack-ceilometer-collector,"
226 PACKAGES+=",openstack-heat-api-cloudwatch,openstack-heat-engine,openstack-heat-common,openstack-ceilometer-notification"
227 PACKAGES+=",hiera,puppet,memcached,keepalived,mariadb,mariadb-server,rabbitmq-server,python-pbr,python-proliantutils"
228 PACKAGES+=",ceph-common"
229
230 # install the packages above and enabling ceph to live on the controller
231 LIBGUESTFS_BACKEND=direct virt-customize --install $PACKAGES \
232     --run-command "sed -i '/ControllerEnableCephStorage/c\\  ControllerEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
233     --run-command "sed -i '/  \$enable_ceph = /c\\  \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller_pacemaker.pp" \
234     --run-command "sed -i '/  \$enable_ceph = /c\\  \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller.pp" \
235     -a instack.qcow2
236 popd
237
238
239 pushd stack
240
241 ##########################################################
242 #####  Prep initial overcloud image with common deps #####
243 ##########################################################
244
245 # make a copy of the cached overcloud-full image
246 cp overcloud-full.qcow2 overcloud-full-opendaylight.qcow2
247 # Update puppet-aodh it's old
248 rm -rf aodh
249 git clone https://github.com/openstack/puppet-aodh aodh
250 pushd aodh
251 git checkout stable/liberty
252 popd
253 tar -czf puppet-aodh.tar.gz aodh
254
255 # Add epel, aodh and ceph, remove openstack-neutron-openvswitch
256 AODH_PKG="openstack-aodh-api,openstack-aodh-common,openstack-aodh-compat,openstack-aodh-evaluator,openstack-aodh-expirer"
257 AODH_PKG+=",openstack-aodh-listener,openstack-aodh-notifier"
258 LIBGUESTFS_BACKEND=direct virt-customize \
259     --upload puppet-aodh.tar.gz:/etc/puppet/modules/ \
260     --run-command "cd /etc/puppet/modules/ && rm -rf aodh && tar xzf puppet-aodh.tar.gz" \
261     --run-command "yum remove -y openstack-neutron-openvswitch" \
262     --run-command "echo 'nf_conntrack_proto_sctp' > /etc/modules-load.d/nf_conntrack_proto_sctp.conf" \
263     --run-command "if ! rpm -q epel-release > /dev/null; then yum install -y http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm; fi" \
264     --install https://github.com/michaeltchapman/networking_rpm/raw/master/openstack-neutron-bgpvpn-2015.2-1.el7.centos.noarch.rpm \
265     --install "$AODH_PKG,ceph" \
266     -a overcloud-full-opendaylight.qcow2
267
268 ###############################################
269 #####    Adding OpenDaylight to overcloud #####
270 ###############################################
271
272 cat > /tmp/opendaylight.repo << EOF
273 [opendaylight]
274 name=OpenDaylight \$releasever - \$basearch
275 baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-4-testing/\$basearch/os/
276 enabled=1
277 gpgcheck=0
278 EOF
279
280 odlrpm=opendaylight-4.0.0-1.rc3.1.el7.noarch.rpm
281 if [ -f ${rdo_images_uri}/$odlrpm ]; then
282     LIBGUESTFS_BACKEND=direct virt-customize --upload ${rdo_images_uri}/$odlrpm:/tmp/
283     opendaylight=/tmp/$odlrpm
284 else
285     opendaylight=opendaylight
286 fi
287
288 # install ODL packages
289 LIBGUESTFS_BACKEND=direct virt-customize \
290     --upload /tmp/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \
291     --install ${opendaylight},python-networking-odl \
292     -a overcloud-full-opendaylight.qcow2
293
294 ## WORK AROUND
295 ## when OpenDaylight lands in upstream RDO manager this can be removed
296
297 # upload the opendaylight puppet module
298 rm -rf puppet-opendaylight
299 if [ -f ${rdo_images_uri}/puppet-opendaylight-3.2.2.tar.gz ]; then
300     cp ${rdo_images_uri}/puppet-opendaylight-3.2.2.tar.gz puppet-opendaylight.tar.gz
301 else
302     git clone -b opnfv_integration https://github.com/dfarrell07/puppet-opendaylight
303     pushd puppet-opendaylight
304     git archive --format=tar.gz --prefix=opendaylight/ HEAD > ../puppet-opendaylight.tar.gz
305     popd
306 fi
307 LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ \
308                                          --run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" \
309                                          --upload ../opendaylight-puppet-neutron.patch:/tmp \
310                                          --run-command "cd /etc/puppet/modules/neutron && patch -Np1 < /tmp/opendaylight-puppet-neutron.patch" \
311                                          -a overcloud-full-opendaylight.qcow2
312
313 # Patch in OpenDaylight installation and configuration
314 LIBGUESTFS_BACKEND=direct virt-customize --upload ../opnfv-tripleo-heat-templates.patch:/tmp \
315                                          --run-command "cd /usr/share/openstack-tripleo-heat-templates/ && patch -Np1 < /tmp/opnfv-tripleo-heat-templates.patch" \
316                                          -a instack.qcow2
317
318 # REMOVE ME AFTER Brahmaputra
319 LIBGUESTFS_BACKEND=direct virt-customize --upload ../puppet-neutron-force-metadata.patch:/tmp \
320                                          --run-command "cd /etc/puppet/modules/neutron && patch -Np1 < /tmp/puppet-neutron-force-metadata.patch" \
321                                          -a overcloud-full-opendaylight.qcow2
322
323 LIBGUESTFS_BACKEND=direct virt-customize --upload ../puppet-cinder-quota-fix.patch:/tmp \
324                                          --run-command "cd /etc/puppet/modules/cinder && patch -Np1 < /tmp/puppet-cinder-quota-fix.patch" \
325                                          -a overcloud-full-opendaylight.qcow2
326
327 LIBGUESTFS_BACKEND=direct virt-customize --upload ../aodh-puppet-tripleo.patch:/tmp \
328                                          --run-command "cd /etc/puppet/modules/tripleo && patch -Np1 < /tmp/aodh-puppet-tripleo.patch" \
329                                          -a overcloud-full-opendaylight.qcow2
330
331 # adds tripleoclient aodh workaround
332 # for keystone
333 LIBGUESTFS_BACKEND=direct virt-customize --upload ../aodh-tripleoclient.patch:/tmp \
334                                          --run-command "cd /usr/lib/python2.7/site-packages/tripleoclient && patch -Np1 < /tmp/aodh-tripleoclient.patch" \
335                                          --upload ../aodh-os-cloud-config.patch:/tmp \
336                                          --run-command "cd /usr/lib/python2.7/site-packages/os_cloud_config && patch -Np1 < /tmp/aodh-os-cloud-config.patch" \
337                                          -a instack.qcow2
338 # END REMOVE ME AFTER Brahmaputra
339
340 ################################################
341 #####    Adding SFC+OpenDaylight overcloud #####
342 ################################################
343
344 # work around for XFS grow bug
345 # http://xfs.org/index.php/XFS_FAQ#Q:_Why_do_I_receive_No_space_left_on_device_after_xfs_growfs.3F
346 cat > /tmp/xfs-grow-remount-fix.service << EOF
347 [Unit]
348 Description=XFS Grow Bug Remount
349 After=network.target
350 Before=getty@tty1.service
351
352 [Service]
353 Type=oneshot
354 ExecStart=/bin/bash -c "echo 'XFS Grow Bug Remount Sleeping 180s' && sleep 180 && echo 'XFS Grow Bug Remounting Now' && mount -o remount,inode64 /"
355 RemainAfterExit=no
356
357 [Install]
358 WantedBy=multi-user.target
359 EOF
360
361
362 #copy opendaylight overcloud full to isolate odl-sfc
363 cp overcloud-full-opendaylight.qcow2 overcloud-full-opendaylight-sfc.qcow2
364
365 LIBGUESTFS_BACKEND=direct virt-customize \
366     --upload "/tmp/xfs-grow-remount-fix.service:/etc/systemd/system/xfs-grow-remount-fix.service" \
367     --run-command "chmod 664 /etc/systemd/system/xfs-grow-remount-fix.service" \
368     --run-command "systemctl enable xfs-grow-remount-fix.service" \
369     --install 'https://radez.fedorapeople.org/kernel-ml-3.13.7-1.el7.centos.x86_64.rpm' \
370     --run-command 'grub2-set-default "\$(grep -P \"submenu|^menuentry\" /boot/grub2/grub.cfg | cut -d \"\\x27\" | head -n 1)"' \
371     --install 'https://radez.fedorapeople.org/openvswitch-kmod-2.3.90-1.el7.centos.x86_64.rpm' \
372     --run-command 'yum downgrade -y https://radez.fedorapeople.org/openvswitch-2.3.90-1.x86_64.rpm' \
373     --run-command 'rm -f /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/net/openvswitch/openvswitch.ko' \
374     --run-command 'ln -s /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/extra/openvswitch/openvswitch.ko /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/net/openvswitch/openvswitch.ko' \
375     -a overcloud-full-opendaylight-sfc.qcow2
376
377
378
379 ###############################################
380 #####    Adding ONOS to overcloud #####
381 ###############################################
382
383 ## WORK AROUND
384 ## when ONOS lands in upstream OPNFV artifacts this can be removed
385
386 # upload the onos puppet module
387
388 rm -rf puppet-onos
389 git clone https://github.com/bobzhouHW/puppet-onos.git
390 pushd puppet-onos
391 # download jdk, onos and maven dependancy packages.
392 pushd files
393 curl ${onos_artifacts_uri}/jdk-8u51-linux-x64.tar.gz -o ./jdk-8u51-linux-x64.tar.gz
394 curl ${onos_artifacts_uri}/onos-1.3.0.tar.gz -o ./onos-1.3.0.tar.gz
395 curl ${onos_artifacts_uri}/repository.tar -o ./repository.tar
396 popd
397 popd
398 mv puppet-onos onos
399 tar -czf puppet-onos.tar.gz onos
400 LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-onos.tar.gz:/etc/puppet/modules/ \
401                                          --run-command "cd /etc/puppet/modules/ && tar xzf puppet-onos.tar.gz" -a overcloud-full-opendaylight.qcow2
402
403 ## END WORK AROUND
404
405 popd
406
407 # move and Sanitize private keys from instack.json file
408 mv stack/instackenv.json instackenv-virt.json
409 sed -i '/pm_password/c\      "pm_password": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
410 sed -i '/ssh-key/c\  "ssh-key": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
411
412 # clean up the VMs
413 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
414 set -e
415 virsh destroy instack 2> /dev/null || echo -n ''
416 virsh undefine instack --remove-all-storage 2> /dev/null || echo -n ''
417 for i in \$(seq 0 $vm_index); do
418   virsh destroy baremetalbrbm_brbm1_brbm2_brbm3_\$i 2> /dev/null || echo -n ''
419   virsh undefine baremetalbrbm_brbm1_brbm2_brbm3_\$i --remove-all-storage 2> /dev/null || echo -n ''
420 done
421 EOI
422