2 ##############################################################################
3 # Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
13 #rdo_images_uri=https://repos.fedorapeople.org/repos/openstack-m/rdo-images-centos-liberty-opnfv
14 rdo_images_uri=file:///stable-images
15 onos_artifacts_uri=file:///stable-images/onos
19 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null)
20 OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network"
22 # check for dependancy packages
23 for i in rpm-build createrepo libguestfs-tools python-docutils bsdtar; do
24 if ! rpm -q $i > /dev/null; then
25 sudo yum install -y $i
29 # RDO Manager expects a stack user to exist, this checks for one
30 # and creates it if you are root
31 if ! id stack > /dev/null; then
33 sudo echo 'stack ALL=(root) NOPASSWD:ALL' | sudo tee -a /etc/sudoers.d/stack
34 sudo echo 'Defaults:stack !requiretty' | sudo tee -a /etc/sudoers.d/stack
35 sudo chmod 0440 /etc/sudoers.d/stack
36 echo 'Added user stack'
39 # ensure that I can ssh as the stack user
40 if ! sudo grep "$(cat ~/.ssh/id_rsa.pub)" /home/stack/.ssh/authorized_keys; then
41 if ! sudo ls -d /home/stack/.ssh/ ; then
42 sudo mkdir /home/stack/.ssh
43 sudo chown stack:stack /home/stack/.ssh
44 sudo chmod 700 /home/stack/.ssh
46 USER=$(whoami) sudo sh -c "cat ~$USER/.ssh/id_rsa.pub >> /home/stack/.ssh/authorized_keys"
47 sudo chown stack:stack /home/stack/.ssh/authorized_keys
50 # clean up stack user previously build instack disk images
51 ssh -T ${SSH_OPTIONS[@]} stack@localhost "rm -f instack*.qcow2"
53 # Yum repo setup for building the undercloud
54 if ! rpm -q rdo-release > /dev/null && [ "$1" != "-master" ]; then
55 #pulling from current-passed-ci instead of release repos
56 #sudo yum install -y https://rdoproject.org/repos/openstack-${RDO_RELEASE}/rdo-release-${RDO_RELEASE}.rpm
57 sudo yum -y install yum-plugin-priorities
58 sudo yum-config-manager --disable openstack-${RDO_RELEASE}
59 sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
60 sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
61 sudo rm -f /etc/yum.repos.d/delorean-current.repo
62 elif [ "$1" == "-master" ]; then
63 sudo yum -y install yum-plugin-priorities
64 sudo yum-config-manager --disable openstack-${RDO_RELEASE}
65 sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7/current-passed-ci/delorean.repo
66 sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
67 sudo rm -f /etc/yum.repos.d/delorean-current.repo
70 # install the opendaylight yum repo definition
71 cat << 'EOF' | sudo tee /etc/yum.repos.d/opendaylight.repo
73 name=OpenDaylight $releasever - $basearch
74 baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-33-release/$basearch/os/
79 # ensure the undercloud package is installed so we can build the undercloud
80 if ! rpm -q instack-undercloud > /dev/null; then
81 sudo yum install -y python-tripleoclient
84 # ensure openvswitch is installed
85 if ! rpm -q openvswitch > /dev/null; then
86 sudo yum install -y openvswitch
89 # ensure libvirt is installed
90 if ! rpm -q libvirt-daemon-kvm > /dev/null; then
91 sudo yum install -y libvirt-daemon-kvm
94 # clean this up incase it's there
95 sudo rm -f /tmp/instack.answers
97 # ensure that no previous undercloud VMs are running
99 # and rebuild the bare undercloud VMs
100 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
102 NODE_COUNT=5 NODE_CPU=2 NODE_MEM=8192 TESTENV_ARGS="--baremetal-bridge-names 'brbm brbm1 brbm2 brbm3'" instack-virt-setup
105 # let dhcp happen so we can get the ip
106 # just wait instead of checking until we see an address
107 # because there may be a previous lease that needs
111 # get the undercloud ip address
112 UNDERCLOUD=$(grep instack /var/lib/libvirt/dnsmasq/default.leases | awk '{print $3}' | head -n 1)
113 if [ -z "$UNDERCLOUD" ]; then
114 #if not found then dnsmasq may be using leasefile-ro
115 instack_mac=$(ssh -T ${SSH_OPTIONS[@]} stack@localhost "virsh domiflist instack" | grep default | \
116 grep -Eo "[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+")
117 UNDERCLOUD=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
119 if [ -z "$UNDERCLOUD" ]; then
120 echo "\n\nNever got IP for Instack. Can Not Continue."
123 echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}"
126 echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}"
129 # ensure that we can ssh to the undercloud
131 while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" > /dev/null && [ $CNT -gt 0 ]; do
138 # yum repo, triple-o package and ssh key setup for the undercloud
139 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
142 if ! rpm -q epel-release > /dev/null; then
143 yum install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
146 yum -y install yum-plugin-priorities
147 curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
148 curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
150 cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys
151 chown stack:stack /home/stack/.ssh/authorized_keys
154 # copy instackenv file for future virt deployments
155 if [ ! -d stack ]; then mkdir stack; fi
156 scp ${SSH_OPTIONS[@]} stack@$UNDERCLOUD:instackenv.json stack/instackenv.json
158 # make a copy of instack VM's definitions, and disk image
159 # it must be stopped to make a copy of its disk image
160 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
162 echo "Shutting down instack to gather configs"
163 virsh shutdown instack
165 echo "Waiting for instack VM to shutdown"
167 while virsh list | grep instack > /dev/null && [ $CNT -gt 0 ]; do
172 if virsh list | grep instack > /dev/null; then
173 echo "instack failed to shutdown for copy"
177 echo $'\nGenerating libvirt configuration'
178 for i in \$(seq 0 $vm_index); do
179 virsh dumpxml baremetalbrbm_brbm1_brbm2_brbm3_\$i | awk '/model type='\''virtio'\''/{c++;if(c==2){sub("model type='\''virtio'\''","model type='\''rtl8139'\''");c=0}}1' > baremetalbrbm_brbm1_brbm2_brbm3_\$i.xml
182 virsh dumpxml instack > instack.xml
183 virsh net-dumpxml brbm > brbm-net.xml
184 virsh net-dumpxml brbm1 > brbm1-net.xml
185 virsh net-dumpxml brbm2> brbm2-net.xml
186 virsh net-dumpxml brbm3 > brbm3-net.xml
187 virsh pool-dumpxml default > default-pool.xml
190 # copy off the instack artifacts
191 echo "Copying instack files to build directory"
192 for i in $(seq 0 $vm_index); do
193 scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml .
196 scp ${SSH_OPTIONS[@]} stack@localhost:instack.xml .
197 scp ${SSH_OPTIONS[@]} stack@localhost:brbm-net.xml .
198 scp ${SSH_OPTIONS[@]} stack@localhost:brbm1-net.xml .
199 scp ${SSH_OPTIONS[@]} stack@localhost:brbm2-net.xml .
200 scp ${SSH_OPTIONS[@]} stack@localhost:brbm3-net.xml .
201 scp ${SSH_OPTIONS[@]} stack@localhost:default-pool.xml .
203 # pull down the the built images
204 echo "Copying overcloud resources"
205 IMAGES="overcloud-full.tar"
206 IMAGES+=" undercloud.qcow2"
209 # download prebuilt images from RDO Project
210 if [ "$(curl -L $rdo_images_uri/${i}.md5 | awk {'print $1'})" != "$(md5sum stack/$i | awk {'print $1'})" ] ; then
211 #if [ $i == "undercloud.qcow2" ]; then
212 ### there's a problem with the Content-Length reported by the centos artifacts
213 ### server so using wget for it until a resolution is figured out.
214 #wget -nv -O stack/$i $rdo_images_uri/$i
216 curl $rdo_images_uri/$i -o stack/$i
219 # only untar the tar files
220 if [ "${i##*.}" == "tar" ]; then tar -xf stack/$i -C stack/; fi
223 #Adding OpenStack packages to undercloud
225 cp undercloud.qcow2 instack.qcow2
226 LIBGUESTFS_BACKEND=direct virt-customize --install yum-priorities -a instack.qcow2
227 PACKAGES="qemu-kvm-common,qemu-kvm,libvirt-daemon-kvm,libguestfs,python-libguestfs,openstack-nova-compute"
228 PACKAGES+=",openstack-swift,openstack-ceilometer-api,openstack-neutron-ml2,openstack-ceilometer-alarm"
229 PACKAGES+=",openstack-nova-conductor,openstack-ironic-inspector,openstack-ironic-api,python-openvswitch"
230 PACKAGES+=",openstack-glance,python-glance,python-troveclient,openstack-puppet-modules"
231 PACKAGES+=",openstack-neutron,openstack-neutron-openvswitch,openstack-nova-scheduler,openstack-keystone,openstack-swift-account"
232 PACKAGES+=",openstack-swift-container,openstack-swift-object,openstack-swift-plugin-swift3,openstack-swift-proxy"
233 PACKAGES+=",openstack-nova-api,openstack-nova-cert,openstack-heat-api-cfn,openstack-heat-api,"
234 PACKAGES+=",openstack-ceilometer-central,openstack-ceilometer-polling,openstack-ceilometer-collector,"
235 PACKAGES+=",openstack-heat-api-cloudwatch,openstack-heat-engine,openstack-heat-common,openstack-ceilometer-notification"
236 PACKAGES+=",hiera,puppet,memcached,keepalived,mariadb,mariadb-server,rabbitmq-server,python-pbr,python-proliantutils"
237 PACKAGES+=",ceph-common"
239 # install the packages above and enabling ceph to live on the controller
240 LIBGUESTFS_BACKEND=direct virt-customize --install $PACKAGES \
241 --run-command "sed -i '/ControllerEnableCephStorage/c\\ ControllerEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
242 --run-command "sed -i '/ \$enable_ceph = /c\\ \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller_pacemaker.pp" \
243 --run-command "sed -i '/ \$enable_ceph = /c\\ \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller.pp" \
250 ##########################################################
251 ##### Prep initial overcloud image with common deps #####
252 ##########################################################
254 # make a copy of the cached overcloud-full image
255 cp overcloud-full.qcow2 overcloud-full-opendaylight.qcow2
256 # Update puppet-aodh it's old
258 git clone https://github.com/openstack/puppet-aodh aodh
260 git checkout stable/liberty
262 tar -czf puppet-aodh.tar.gz aodh
264 # work around for XFS grow bug
265 # http://xfs.org/index.php/XFS_FAQ#Q:_Why_do_I_receive_No_space_left_on_device_after_xfs_growfs.3F
266 cat > /tmp/xfs-grow-remount-fix.service << EOF
268 Description=XFS Grow Bug Remount
270 Before=getty@tty1.service
274 ExecStart=/bin/bash -c "mount -o remount,inode64 /"
278 WantedBy=multi-user.target
281 # Add epel, aodh and ceph, remove openstack-neutron-openvswitch
282 AODH_PKG="openstack-aodh-api,openstack-aodh-common,openstack-aodh-compat,openstack-aodh-evaluator,openstack-aodh-expirer"
283 AODH_PKG+=",openstack-aodh-listener,openstack-aodh-notifier"
284 LIBGUESTFS_BACKEND=direct virt-customize --upload "/tmp/xfs-grow-remount-fix.service:/usr/lib/systemd/system/xfs-grow-remount-fix.service" \
285 --run-command "systemctl enable xfs-grow-remount-fix.service" \
286 --upload puppet-aodh.tar.gz:/etc/puppet/modules/ \
287 --run-command "cd /etc/puppet/modules/ && rm -rf aodh && tar xzf puppet-aodh.tar.gz" \
288 --run-command "yum remove -y openstack-neutron-openvswitch" \
289 --run-command "echo 'nf_conntrack_proto_sctp' > /etc/modules-load.d/nf_conntrack_proto_sctp.conf" \
290 --install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \
291 --install "$AODH_PKG,ceph" \
292 -a overcloud-full-opendaylight.qcow2
294 ###############################################
295 ##### Adding OpenDaylight to overcloud #####
296 ###############################################
298 cat > /tmp/opendaylight.repo << EOF
300 name=OpenDaylight \$releasever - \$basearch
301 baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-33-release/\$basearch/os/
306 # install ODL packages
307 LIBGUESTFS_BACKEND=direct virt-customize \
308 --upload /tmp/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \
309 --install opendaylight,python-networking-odl \
310 -a overcloud-full-opendaylight.qcow2
313 ## when OpenDaylight lands in upstream RDO manager this can be removed
315 # upload the opendaylight puppet module
316 rm -rf puppet-opendaylight
317 git clone -b 2.2.0 https://github.com/dfarrell07/puppet-opendaylight
318 pushd puppet-opendaylight
319 git archive --format=tar.gz --prefix=opendaylight/ HEAD > ../puppet-opendaylight.tar.gz
321 LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ \
322 --run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" \
323 --upload ../opendaylight-puppet-neutron.patch:/tmp \
324 --run-command "cd /etc/puppet/modules/neutron && patch -Np1 < /tmp/opendaylight-puppet-neutron.patch" \
325 -a overcloud-full-opendaylight.qcow2
327 # Patch in OpenDaylight installation and configuration
328 LIBGUESTFS_BACKEND=direct virt-customize --upload ../opnfv-tripleo-heat-templates.patch:/tmp \
329 --run-command "cd /usr/share/openstack-tripleo-heat-templates/ && patch -Np1 < /tmp/opnfv-tripleo-heat-templates.patch" \
332 # REMOVE ME AFTER Brahmaputra
333 LIBGUESTFS_BACKEND=direct virt-customize --upload ../puppet-neutron-force-metadata.patch:/tmp \
334 --run-command "cd /etc/puppet/modules/neutron && patch -Np1 < /tmp/puppet-neutron-force-metadata.patch" \
335 -a overcloud-full-opendaylight.qcow2
336 LIBGUESTFS_BACKEND=direct virt-customize --upload ../puppet-cinder-quota-fix.patch:/tmp \
337 --run-command "cd /etc/puppet/modules/cinder && patch -Np1 < /tmp/puppet-cinder-quota-fix.patch" \
338 -a overcloud-full-opendaylight.qcow2
340 # adds tripleoclient aodh workaround
342 LIBGUESTFS_BACKEND=direct virt-customize --upload ../aodh-tripleoclient.patch:/tmp \
343 --run-command "cd /usr/lib/python2.7/site-packages/tripleoclient && patch -Np1 < /tmp/aodh-tripleoclient.patch" \
344 --upload ../aodh-os-cloud-config.patch:/tmp \
345 --run-command "cd /usr/lib/python2.7/site-packages/os_cloud_config && patch -Np1 < /tmp/aodh-os-cloud-config.patch" \
347 # END REMOVE ME AFTER Brahmaputra
349 ################################################
350 ##### Adding SFC+OpenDaylight overcloud #####
351 ################################################
353 cat > /tmp/opendaylight.repo << EOF
355 name=OpenDaylight \$releasever - \$basearch
356 baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-4-testing/\$basearch/os/
361 #copy opendaylight overcloud full to isolate odl-sfc
362 cp overcloud-full-opendaylight.qcow2 overcloud-full-opendaylight-sfc.qcow2
364 # upload the opendaylight puppet module
365 rm -rf puppet-opendaylight
366 git clone -b 3.0.0 https://github.com/dfarrell07/puppet-opendaylight
367 pushd puppet-opendaylight
368 git archive --format=tar.gz --prefix=opendaylight/ HEAD > ../puppet-opendaylight.tar.gz
371 LIBGUESTFS_BACKEND=direct virt-customize \
372 --install 'https://radez.fedorapeople.org/kernel-ml-3.13.7-1.el7.centos.x86_64.rpm' \
373 --run-command 'grub2-set-default "\$(grep -P \"submenu|^menuentry\" /boot/grub2/grub.cfg | cut -d \"\\x27\" | head -n 1)"' \
374 --install 'https://radez.fedorapeople.org/openvswitch-kmod-2.3.90-1.el7.centos.x86_64.rpm' \
375 --run-command 'yum downgrade -y https://radez.fedorapeople.org/openvswitch-2.3.90-1.x86_64.rpm' \
376 --run-command 'rm -f /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/net/openvswitch/openvswitch.ko' \
377 --run-command 'ln -s /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/extra/openvswitch/openvswitch.ko /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/net/openvswitch/openvswitch.ko' \
378 --upload /tmp/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \
379 --run-command "yum update -y opendaylight" \
380 --run-command "rm -rf /etc/puppet/modules/opendaylight && rm -f /etc/puppet/modules/puppet-opendaylight.tar.gz " \
381 --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ \
382 --run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" \
383 -a overcloud-full-opendaylight-sfc.qcow2
387 ###############################################
388 ##### Adding ONOS to overcloud #####
389 ###############################################
392 ## when ONOS lands in upstream OPNFV artifacts this can be removed
394 # upload the onos puppet module
397 git clone https://github.com/bobzhouHW/puppet-onos.git
399 # download jdk, onos and maven dependancy packages.
401 curl ${onos_artifacts_uri}/jdk-8u51-linux-x64.tar.gz -o ./jdk-8u51-linux-x64.tar.gz
402 curl ${onos_artifacts_uri}/onos-1.3.0.tar.gz -o ./onos-1.3.0.tar.gz
403 curl ${onos_artifacts_uri}/repository.tar -o ./repository.tar
407 tar -czf puppet-onos.tar.gz onos
408 LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-onos.tar.gz:/etc/puppet/modules/ \
409 --run-command "cd /etc/puppet/modules/ && tar xzf puppet-onos.tar.gz" -a overcloud-full-opendaylight.qcow2
415 # move and Sanitize private keys from instack.json file
416 mv stack/instackenv.json instackenv-virt.json
417 sed -i '/pm_password/c\ "pm_password": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
418 sed -i '/ssh-key/c\ "ssh-key": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
421 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
423 virsh destroy instack 2> /dev/null || echo -n ''
424 virsh undefine instack --remove-all-storage 2> /dev/null || echo -n ''
425 for i in \$(seq 0 $vm_index); do
426 virsh destroy baremetalbrbm_brbm1_brbm2_brbm3_\$i 2> /dev/null || echo -n ''
427 virsh undefine baremetalbrbm_brbm1_brbm2_brbm3_\$i --remove-all-storage 2> /dev/null || echo -n ''