2 ##############################################################################
3 # Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
13 #rdo_images_uri=https://repos.fedorapeople.org/repos/openstack-m/rdo-images-centos-liberty-opnfv
14 rdo_images_uri=file:///stable-images
15 rdo_images_cache=/stable-images
16 onos_artifacts_uri=file:///stable-images/onos
17 odl_artifacts_cache=/stable-images/odl
21 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null)
22 OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network"
24 # check for dependency packages
25 for i in rpm-build createrepo libguestfs-tools python-docutils bsdtar; do
26 if ! rpm -q $i > /dev/null; then
27 sudo yum install -y $i
31 # RDO Manager expects a stack user to exist, this checks for one
32 # and creates it if you are root
33 if ! id stack > /dev/null; then
35 sudo echo 'stack ALL=(root) NOPASSWD:ALL' | sudo tee -a /etc/sudoers.d/stack
36 sudo echo 'Defaults:stack !requiretty' | sudo tee -a /etc/sudoers.d/stack
37 sudo chmod 0440 /etc/sudoers.d/stack
38 echo 'Added user stack'
41 # ensure that I can ssh as the stack user
42 if ! sudo grep "$(cat ~/.ssh/id_rsa.pub)" /home/stack/.ssh/authorized_keys; then
43 if ! sudo ls -d /home/stack/.ssh/ ; then
44 sudo mkdir /home/stack/.ssh
45 sudo chown stack:stack /home/stack/.ssh
46 sudo chmod 700 /home/stack/.ssh
48 USER=$(whoami) sudo sh -c "cat ~$USER/.ssh/id_rsa.pub >> /home/stack/.ssh/authorized_keys"
49 sudo chown stack:stack /home/stack/.ssh/authorized_keys
52 # clean up stack user previously build instack disk images
53 ssh -T ${SSH_OPTIONS[@]} stack@localhost "rm -f instack*.qcow2"
55 # Yum repo setup for building the undercloud
56 if ! rpm -q rdo-release > /dev/null && [ "$1" != "-master" ]; then
57 #pulling from current-passed-ci instead of release repos
58 #sudo yum install -y https://rdoproject.org/repos/openstack-${RDO_RELEASE}/rdo-release-${RDO_RELEASE}.rpm
59 sudo yum -y install yum-plugin-priorities
60 sudo yum-config-manager --disable openstack-${RDO_RELEASE}
61 sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
62 sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
63 sudo rm -f /etc/yum.repos.d/delorean-current.repo
64 elif [ "$1" == "-master" ]; then
65 sudo yum -y install yum-plugin-priorities
66 sudo yum-config-manager --disable openstack-${RDO_RELEASE}
67 sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7/current-passed-ci/delorean.repo
68 sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
69 sudo rm -f /etc/yum.repos.d/delorean-current.repo
72 # ensure the undercloud package is installed so we can build the undercloud
73 if ! rpm -q instack-undercloud > /dev/null; then
74 sudo yum install -y python-tripleoclient
77 # ensure openvswitch is installed
78 if ! rpm -q openvswitch > /dev/null; then
79 sudo yum install -y openvswitch
82 # ensure libvirt is installed
83 if ! rpm -q libvirt-daemon-kvm > /dev/null; then
84 sudo yum install -y libvirt-daemon-kvm
87 # clean this up incase it's there
88 sudo rm -f /tmp/instack.answers
90 # ensure that no previous undercloud VMs are running
92 # and rebuild the bare undercloud VMs
93 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
95 NODE_COUNT=5 NODE_CPU=2 NODE_MEM=8192 TESTENV_ARGS="--baremetal-bridge-names 'brbm brbm1 brbm2 brbm3'" instack-virt-setup
98 # let dhcp happen so we can get the ip
99 # just wait instead of checking until we see an address
100 # because there may be a previous lease that needs
104 # get the undercloud ip address
105 UNDERCLOUD=$(grep instack /var/lib/libvirt/dnsmasq/default.leases | awk '{print $3}' | head -n 1)
106 if [ -z "$UNDERCLOUD" ]; then
107 #if not found then dnsmasq may be using leasefile-ro
108 instack_mac=$(ssh -T ${SSH_OPTIONS[@]} stack@localhost "virsh domiflist instack" | grep default | \
109 grep -Eo "[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+")
110 UNDERCLOUD=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
112 if [ -z "$UNDERCLOUD" ]; then
113 echo "\n\nNever got IP for Instack. Can Not Continue."
116 echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}"
119 echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}"
122 # ensure that we can ssh to the undercloud
124 while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" > /dev/null && [ $CNT -gt 0 ]; do
131 # yum repo, triple-o package and ssh key setup for the undercloud
132 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
135 if ! rpm -q epel-release > /dev/null; then
136 yum install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
139 yum -y install yum-plugin-priorities
140 curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
141 curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
143 cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys
144 chown stack:stack /home/stack/.ssh/authorized_keys
147 # copy instackenv file for future virt deployments
148 if [ ! -d stack ]; then mkdir stack; fi
149 scp ${SSH_OPTIONS[@]} stack@$UNDERCLOUD:instackenv.json stack/instackenv.json
151 # make a copy of instack VM's definitions, and disk image
152 # it must be stopped to make a copy of its disk image
153 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
155 echo "Shutting down instack to gather configs"
156 virsh shutdown instack
158 echo "Waiting for instack VM to shutdown"
160 while virsh list | grep instack > /dev/null && [ $CNT -gt 0 ]; do
165 if virsh list | grep instack > /dev/null; then
166 echo "instack failed to shutdown for copy"
170 echo $'\nGenerating libvirt configuration'
171 for i in \$(seq 0 $vm_index); do
172 virsh dumpxml baremetalbrbm_brbm1_brbm2_brbm3_\$i | awk '/model type='\''virtio'\''/{c++;if(c==2){sub("model type='\''virtio'\''","model type='\''rtl8139'\''");c=0}}1' > baremetalbrbm_brbm1_brbm2_brbm3_\$i.xml
175 virsh dumpxml instack > instack.xml
176 virsh net-dumpxml brbm > brbm-net.xml
177 virsh net-dumpxml brbm1 > brbm1-net.xml
178 virsh net-dumpxml brbm2> brbm2-net.xml
179 virsh net-dumpxml brbm3 > brbm3-net.xml
180 virsh pool-dumpxml default > default-pool.xml
183 # copy off the instack artifacts
184 echo "Copying instack files to build directory"
185 for i in $(seq 0 $vm_index); do
186 scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml .
189 scp ${SSH_OPTIONS[@]} stack@localhost:instack.xml .
190 scp ${SSH_OPTIONS[@]} stack@localhost:brbm-net.xml .
191 scp ${SSH_OPTIONS[@]} stack@localhost:brbm1-net.xml .
192 scp ${SSH_OPTIONS[@]} stack@localhost:brbm2-net.xml .
193 scp ${SSH_OPTIONS[@]} stack@localhost:brbm3-net.xml .
194 scp ${SSH_OPTIONS[@]} stack@localhost:default-pool.xml .
196 # pull down the the built images
197 echo "Copying overcloud resources"
198 IMAGES="overcloud-full.tar"
199 IMAGES+=" undercloud.qcow2"
202 # download prebuilt images from RDO Project
203 if [ ! -f stack/$i ] || [ "$(curl -L $rdo_images_uri/${i}.md5 | awk {'print $1'})" != "$(md5sum stack/$i | awk {'print $1'})" ] ; then
204 #if [ $i == "undercloud.qcow2" ]; then
205 ### there's a problem with the Content-Length reported by the centos artifacts
206 ### server so using wget for it until a resolution is figured out.
207 #wget -nv -O stack/$i $rdo_images_uri/$i
209 curl $rdo_images_uri/$i -o stack/$i
212 # only untar the tar files
213 if [ "${i##*.}" == "tar" ]; then tar -xf stack/$i -C stack/; fi
216 #Adding OpenStack packages to undercloud
218 cp undercloud.qcow2 instack.qcow2
219 LIBGUESTFS_BACKEND=direct virt-customize --install yum-priorities -a instack.qcow2
220 PACKAGES="qemu-kvm-common,qemu-kvm,libvirt-daemon-kvm,libguestfs,python-libguestfs,openstack-nova-compute"
221 PACKAGES+=",openstack-swift,openstack-ceilometer-api,openstack-neutron-ml2,openstack-ceilometer-alarm"
222 PACKAGES+=",openstack-nova-conductor,openstack-ironic-inspector,openstack-ironic-api,python-openvswitch"
223 PACKAGES+=",openstack-glance,python-glance,python-troveclient,openstack-puppet-modules"
224 PACKAGES+=",openstack-neutron,openstack-neutron-openvswitch,openstack-nova-scheduler,openstack-keystone,openstack-swift-account"
225 PACKAGES+=",openstack-swift-container,openstack-swift-object,openstack-swift-plugin-swift3,openstack-swift-proxy"
226 PACKAGES+=",openstack-nova-api,openstack-nova-cert,openstack-heat-api-cfn,openstack-heat-api,"
227 PACKAGES+=",openstack-ceilometer-central,openstack-ceilometer-polling,openstack-ceilometer-collector,"
228 PACKAGES+=",openstack-heat-api-cloudwatch,openstack-heat-engine,openstack-heat-common,openstack-ceilometer-notification"
229 PACKAGES+=",hiera,puppet,memcached,keepalived,mariadb,mariadb-server,rabbitmq-server,python-pbr,python-proliantutils"
230 PACKAGES+=",ceph-common"
232 # install the packages above and enabling ceph to live on the controller
233 # OpenWSMan package update supports the AMT Ironic driver for the TealBox
234 LIBGUESTFS_BACKEND=direct virt-customize --install $PACKAGES \
235 --run-command "sed -i '/ControllerEnableCephStorage/c\\ ControllerEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
236 --run-command "sed -i '/ \$enable_ceph = /c\\ \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller_pacemaker.pp" \
237 --run-command "sed -i '/ \$enable_ceph = /c\\ \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller.pp" \
238 --run-command "curl http://download.opensuse.org/repositories/Openwsman/CentOS_CentOS-7/Openwsman.repo > /etc/yum.repos.d/wsman.repo" \
239 --run-command "yum update -y openwsman*" \
240 --run-command "sed -i '/pxe_wol/c\\ enabled_drivers => ['pxe_ipmitool', 'pxe_ssh', 'pxe_drac', 'pxe_ilo', 'pxe_wol', 'pxe_amt'],' /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp" \
247 ##########################################################
248 ##### Prep initial overcloud image with common deps #####
249 ##########################################################
251 # make a copy of the cached overcloud-full image
252 cp overcloud-full.qcow2 overcloud-full-opendaylight.qcow2
253 # Update puppet-aodh it's old
255 git clone https://github.com/openstack/puppet-aodh aodh
257 git checkout stable/liberty
259 tar -czf puppet-aodh.tar.gz aodh
261 # Add epel, aodh and ceph, remove openstack-neutron-openvswitch
262 AODH_PKG="openstack-aodh-api,openstack-aodh-common,openstack-aodh-compat,openstack-aodh-evaluator,openstack-aodh-expirer"
263 AODH_PKG+=",openstack-aodh-listener,openstack-aodh-notifier"
264 LIBGUESTFS_BACKEND=direct virt-customize \
265 --upload puppet-aodh.tar.gz:/etc/puppet/modules/ \
266 --run-command "cd /etc/puppet/modules/ && rm -rf aodh && tar xzf puppet-aodh.tar.gz" \
267 --run-command "yum remove -y openstack-neutron-openvswitch" \
268 --run-command "echo 'nf_conntrack_proto_sctp' > /etc/modules-load.d/nf_conntrack_proto_sctp.conf" \
269 --run-command "if ! rpm -q epel-release > /dev/null; then yum install -y http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm; fi" \
270 --install https://github.com/michaeltchapman/networking_rpm/raw/master/openstack-neutron-bgpvpn-2015.2-1.el7.centos.noarch.rpm \
271 --install "$AODH_PKG,ceph" \
272 -a overcloud-full-opendaylight.qcow2
274 ###############################################
275 ##### Adding OpenDaylight to overcloud #####
276 ###############################################
278 cat > /tmp/opendaylight.repo << EOF
280 name=OpenDaylight \$releasever - \$basearch
281 baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-40-release/\$basearch/os/
286 odlrpm=opendaylight-4.0.0-1.el7.noarch.rpm
287 if [ -f ${rdo_images_cache}/$odlrpm ]; then
288 LIBGUESTFS_BACKEND=direct virt-customize --upload ${rdo_images_cache}/$odlrpm:/tmp/
289 opendaylight=/tmp/$odlrpm
291 opendaylight=opendaylight
294 # install ODL packages
295 LIBGUESTFS_BACKEND=direct virt-customize \
296 --upload /tmp/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \
297 --install ${opendaylight},python-networking-odl \
298 -a overcloud-full-opendaylight.qcow2
300 # install Jolokia for ODL HA
301 LIBGUESTFS_BACKEND=direct virt-customize \
302 --upload ${odl_artifacts_cache}/jolokia.tar.gz:/tmp/ \
303 --run-command "tar -xvf /tmp/jolokia.tar.gz -C /opt/opendaylight/system/org" \
304 -a overcloud-full-opendaylight.qcow2
307 ## when OpenDaylight lands in upstream RDO manager this can be removed
309 # upload the opendaylight puppet module
310 rm -rf puppet-opendaylight
311 # TMP FIX to see if this works
312 git clone -b odl_ha_proxy_fix https://github.com/trozet/puppet-opendaylight
313 pushd puppet-opendaylight
314 git archive --format=tar.gz --prefix=opendaylight/ HEAD > ../puppet-opendaylight.tar.gz
317 # grab latest puppet-neutron module
318 rm -rf puppet-neutron
319 git clone -b stable/liberty https://github.com/openstack/puppet-neutron.git
321 git archive --format=tar.gz --prefix=neutron/ HEAD > ../puppet-neutron.tar.gz
324 LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ \
325 --run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" \
326 --run-command "rm -rf /etc/puppet/modules/neutron" \
327 --upload puppet-neutron.tar.gz:/etc/puppet/modules/ \
328 --run-command "cd /etc/puppet/modules/ && tar xzf puppet-neutron.tar.gz" \
329 -a overcloud-full-opendaylight.qcow2
331 # Patch in OpenDaylight installation and configuration
332 LIBGUESTFS_BACKEND=direct virt-customize --upload ../opnfv-tripleo-heat-templates.patch:/tmp \
333 --run-command "cd /usr/share/openstack-tripleo-heat-templates/ && patch -Np1 < /tmp/opnfv-tripleo-heat-templates.patch" \
336 # Patch in OPNFV custom puppet-tripleO
337 LIBGUESTFS_BACKEND=direct virt-customize --upload ../opnfv-puppet-tripleo.patch:/tmp \
338 --run-command "cd /etc/puppet/modules/tripleo && patch -Np1 < /tmp/opnfv-puppet-tripleo.patch" \
339 -a overcloud-full-opendaylight.qcow2
341 # REMOVE ME AFTER Brahmaputra
342 LIBGUESTFS_BACKEND=direct virt-customize --upload ../puppet-cinder-quota-fix.patch:/tmp \
343 --run-command "cd /etc/puppet/modules/cinder && patch -Np1 < /tmp/puppet-cinder-quota-fix.patch" \
344 -a overcloud-full-opendaylight.qcow2
346 LIBGUESTFS_BACKEND=direct virt-customize --upload ../aodh-puppet-tripleo.patch:/tmp \
347 --run-command "cd /etc/puppet/modules/tripleo && patch -Np1 < /tmp/aodh-puppet-tripleo.patch" \
348 -a overcloud-full-opendaylight.qcow2
350 # adds tripleoclient aodh workaround
352 LIBGUESTFS_BACKEND=direct virt-customize --upload ../aodh-tripleoclient.patch:/tmp \
353 --run-command "cd /usr/lib/python2.7/site-packages/tripleoclient && patch -Np1 < /tmp/aodh-tripleoclient.patch" \
354 --upload ../aodh-os-cloud-config.patch:/tmp \
355 --run-command "cd /usr/lib/python2.7/site-packages/os_cloud_config && patch -Np1 < /tmp/aodh-os-cloud-config.patch" \
357 # END REMOVE ME AFTER Brahmaputra
359 ################################################
360 ##### Adding SFC+OpenDaylight overcloud #####
361 ################################################
363 # work around for XFS grow bug
364 # http://xfs.org/index.php/XFS_FAQ#Q:_Why_do_I_receive_No_space_left_on_device_after_xfs_growfs.3F
365 cat > /tmp/xfs-grow-remount-fix.service << EOF
367 Description=XFS Grow Bug Remount
369 Before=getty@tty1.service
373 ExecStart=/bin/bash -c "echo 'XFS Grow Bug Remount Sleeping 180s' && sleep 180 && echo 'XFS Grow Bug Remounting Now' && mount -o remount,inode64 /"
377 WantedBy=multi-user.target
381 #copy opendaylight overcloud full to isolate odl-sfc
382 cp overcloud-full-opendaylight.qcow2 overcloud-full-opendaylight-sfc.qcow2
384 LIBGUESTFS_BACKEND=direct virt-customize \
385 --upload "/tmp/xfs-grow-remount-fix.service:/etc/systemd/system/xfs-grow-remount-fix.service" \
386 --run-command "chmod 664 /etc/systemd/system/xfs-grow-remount-fix.service" \
387 --run-command "systemctl enable xfs-grow-remount-fix.service" \
388 --install 'https://radez.fedorapeople.org/kernel-ml-3.13.7-1.el7.centos.x86_64.rpm' \
389 --run-command 'grub2-set-default "\$(grep -P \"submenu|^menuentry\" /boot/grub2/grub.cfg | cut -d \"\\x27\" | head -n 1)"' \
390 --install 'https://radez.fedorapeople.org/openvswitch-kmod-2.3.90-1.el7.centos.x86_64.rpm' \
391 --run-command 'yum downgrade -y https://radez.fedorapeople.org/openvswitch-2.3.90-1.x86_64.rpm' \
392 --run-command 'rm -f /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/net/openvswitch/openvswitch.ko' \
393 --run-command 'ln -s /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/extra/openvswitch/openvswitch.ko /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/net/openvswitch/openvswitch.ko' \
394 -a overcloud-full-opendaylight-sfc.qcow2
398 ###############################################
399 ##### Adding ONOS to overcloud #####
400 ###############################################
403 ## when ONOS lands in upstream OPNFV artifacts this can be removed
405 # upload the onos puppet module
408 git clone https://github.com/bobzhouHW/puppet-onos.git
410 # download jdk, onos and maven dependancy packages.
412 curl ${onos_artifacts_uri}/jdk-8u51-linux-x64.tar.gz -o ./jdk-8u51-linux-x64.tar.gz
413 curl ${onos_artifacts_uri}/onos-1.3.0.tar.gz -o ./onos-1.3.0.tar.gz
414 curl ${onos_artifacts_uri}/repository.tar -o ./repository.tar
418 tar -czf puppet-onos.tar.gz onos
419 LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-onos.tar.gz:/etc/puppet/modules/ \
420 --run-command "cd /etc/puppet/modules/ && tar xzf puppet-onos.tar.gz" -a overcloud-full-opendaylight.qcow2
426 # move and Sanitize private keys from instack.json file
427 mv stack/instackenv.json instackenv-virt.json
428 sed -i '/pm_password/c\ "pm_password": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
429 sed -i '/ssh-key/c\ "ssh-key": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
432 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
434 virsh destroy instack 2> /dev/null || echo -n ''
435 virsh undefine instack --remove-all-storage 2> /dev/null || echo -n ''
436 for i in \$(seq 0 $vm_index); do
437 virsh destroy baremetalbrbm_brbm1_brbm2_brbm3_\$i 2> /dev/null || echo -n ''
438 virsh undefine baremetalbrbm_brbm1_brbm2_brbm3_\$i --remove-all-storage 2> /dev/null || echo -n ''