updating the OpenDaylight patch
[apex.git] / build / instack.sh
index c19ab0f..a6e459f 100755 (executable)
@@ -2,11 +2,14 @@
 set -e
 declare -i CNT
 
-RDO_RELEASE=kilo
+rdo_images_uri=https://ci.centos.org/artifacts/rdo/images/liberty/delorean/stable
+
+vm_index=4
+RDO_RELEASE=liberty
 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null)
 
 # check for dependancy packages
-for i in libguestfs-tools; do
+for i in libguestfs-tools python-docutils bsdtar; do
     if ! rpm -q $i > /dev/null; then
         sudo yum install -y $i
     fi
@@ -37,14 +40,6 @@ fi
 ssh -T ${SSH_OPTIONS[@]} stack@localhost "rm -f instack*.qcow2"
 
 # Yum repo setup for building the undercloud
-if ! rpm -q epel-release > /dev/null; then
-    sudo yum install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
-fi
-
-if ! rpm -q rdo-release > /dev/null; then
-    sudo yum install -y https://rdoproject.org/repos/openstack-${RDO_RELEASE}/rdo-release-${RDO_RELEASE}.rpm
-fi
-
 if ! rpm -q rdo-release > /dev/null && [ "$1" != "-master" ]; then
     sudo yum install -y https://rdoproject.org/repos/openstack-${RDO_RELEASE}/rdo-release-${RDO_RELEASE}.rpm
     sudo rm -rf /etc/yum.repos.d/delorean.repo
@@ -56,9 +51,17 @@ elif [ "$1" == "-master" ]; then
     sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
     sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
     sudo rm -f /etc/yum.repos.d/delorean-current.repo
-
 fi
 
+# install the opendaylight yum repo definition
+cat << 'EOF' | sudo tee /etc/yum.repos.d/opendaylight.repo
+[opendaylight]
+name=OpenDaylight $releasever - $basearch
+baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-3-candidate/$basearch/os/
+enabled=1
+gpgcheck=0
+EOF
+
 # ensure the undercloud package is installed so we can build the undercloud
 if ! rpm -q instack-undercloud > /dev/null; then
     sudo yum install -y python-tripleoclient
@@ -74,17 +77,15 @@ if ! rpm -q libvirt-daemon-kvm > /dev/null; then
     sudo yum install -y libvirt-daemon-kvm
 fi
 
+# clean this up incase it's there
+sudo rm -f /tmp/instack.answers
+
 # ensure that no previous undercloud VMs are running
+sudo ../ci/clean.sh
 # and rebuild the bare undercloud VMs
 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
 set -e
-virsh destroy instack 2> /dev/null || echo -n ''
-virsh undefine instack --remove-all-storage 2> /dev/null || echo -n ''
-virsh destroy baremetalbrbm_0 2> /dev/null || echo -n ''
-virsh undefine baremetalbrbm_0 --remove-all-storage 2> /dev/null || echo -n ''
-virsh destroy baremetalbrbm_1 2> /dev/null || echo -n ''
-virsh undefine baremetalbrbm_1 --remove-all-storage 2> /dev/null || echo -n ''
-NODE_CPU=2 NODE_MEM=8192 instack-virt-setup
+NODE_COUNT=5 NODE_CPU=2 NODE_MEM=8192 TESTENV_ARGS="--baremetal-bridge-names 'brbm brbm1'" instack-virt-setup
 EOI
 
 # let dhcp happen so we can get the ip
@@ -108,6 +109,7 @@ done
 # yum repo, triple-o package and ssh key setup for the undercloud
 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
 set -e
+
 if ! rpm -q epel-release > /dev/null; then
     yum install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
 fi
@@ -115,17 +117,11 @@ fi
 yum -y install yum-plugin-priorities
 curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
 curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
-yum install -y python-tripleoclient
+
 cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys
 chown stack:stack /home/stack/.ssh/authorized_keys
 EOI
 
-# install undercloud on Undercloud VM
-ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "openstack undercloud install"
-
-# Clean cache to reduce the images size
-ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "yum clean all"
-
 # copy instackenv file for future virt deployments
 if [ ! -d stack ]; then mkdir stack; fi
 scp ${SSH_OPTIONS[@]} stack@$UNDERCLOUD:instackenv.json stack/instackenv.json
@@ -134,7 +130,7 @@ scp ${SSH_OPTIONS[@]} stack@$UNDERCLOUD:instackenv.json stack/instackenv.json
 # it must be stopped to make a copy of its disk image
 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
 set -e
-echo "Shutting down instack to take snapshot"
+echo "Shutting down instack to gather configs"
 virsh shutdown instack
 
 echo "Waiting for instack VM to shutdown"
@@ -150,38 +146,97 @@ if virsh list | grep instack > /dev/null; then
 fi
 
 echo $'\nGenerating libvirt configuration'
-virsh dumpxml baremetalbrbm_0 > baremetalbrbm_0.xml
-virsh dumpxml baremetalbrbm_1 > baremetalbrbm_1.xml
+for i in \$(seq 0 $vm_index); do
+  virsh dumpxml baremetalbrbm_brbm1_\$i | awk '/model type='\''virtio'\''/{c++;if(c==2){sub("model type='\''virtio'\''","model type='\''rtl8139'\''");c=0}}1' > baremetalbrbm_brbm1_\$i.xml
+done
+
 virsh dumpxml instack > instack.xml
-#virsh vol-dumpxml instack.qcow2 --pool default > instack.qcow2.xml
 virsh net-dumpxml brbm > brbm-net.xml
+virsh net-dumpxml brbm1 > brbm1-net.xml
 virsh pool-dumpxml default > default-pool.xml
 EOI
 
 # copy off the instack artifacts
 echo "Copying instack files to build directory"
-scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_0.xml .
-scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_1.xml .
+for i in $(seq 0 $vm_index); do
+  scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_brbm1_${i}.xml .
+done
+
 scp ${SSH_OPTIONS[@]} stack@localhost:instack.xml .
 scp ${SSH_OPTIONS[@]} stack@localhost:brbm-net.xml .
+scp ${SSH_OPTIONS[@]} stack@localhost:brbm1-net.xml .
 scp ${SSH_OPTIONS[@]} stack@localhost:default-pool.xml .
 
-sudo cp /var/lib/libvirt/images/instack.qcow2 ./instack.qcow2
-#sudo chown $(whoami):$(whoami) ./instack.qcow2_
-#virt-sparsify --check-tmpdir=fail ./instack.qcow2_ ./instack.qcow2
-#rm -f ./instack.qcow2_
-
 # pull down the the built images
 echo "Copying overcloud resources"
-IMAGES="deploy-ramdisk-ironic.initramfs deploy-ramdisk-ironic.kernel"
-IMAGES+=" ironic-python-agent.initramfs ironic-python-agent.kernel ironic-python-agent.vmlinuz"
-IMAGES+=" overcloud-full.initrd overcloud-full.qcow2  overcloud-full.vmlinuz"
+IMAGES="overcloud-full.tar"
+IMAGES+=" undercloud.qcow2"
 
 for i in $IMAGES; do
   # download prebuilt images from RDO Project
-  curl https://repos.fedorapeople.org/repos/openstack-m/rdo-images-centos-liberty/$i -z stack/$i -o stack/$i --verbose --silent --location
+  if [ "$(curl -L $rdo_images_uri/${i}.md5 | awk {'print $1'})" != "$(md5sum stack/$i | awk {'print $1'})" ] ; then
+    if [ $i == "undercloud.qcow2" ]; then
+      # there's a problem with the Content-Length reported by the centos artifacts
+      # server so using wget for it until a resolution is figured out.
+      wget -nv -O stack/$i $rdo_images_uri/$i
+    else
+      curl $rdo_images_uri/$i -o stack/$i --verbose --silent --location
+    fi
+  fi
+  # only untar the tar files
+  if [ "${i##*.}" == "tar" ]; then tar -xf stack/$i -C stack/; fi
 done
 
+#Adding OpenStack packages to undercloud
+pushd stack
+cp undercloud.qcow2 instack.qcow2
+LIBGUESTFS_BACKEND=direct virt-customize --install yum-priorities -a instack.qcow2
+PACKAGES="qemu-kvm-common,qemu-kvm,libvirt-daemon-kvm,libguestfs,python-libguestfs,openstack-nova-compute"
+PACKAGES+=",openstack-swift,openstack-ceilometer-api,openstack-neutron-ml2,openstack-ceilometer-alarm"
+PACKAGES+=",openstack-nova-conductor,openstack-ironic-inspector,openstack-ironic-api,python-openvswitch"
+PACKAGES+=",openstack-glance,python-glance,python-troveclient,openstack-puppet-modules"
+PACKAGES+=",openstack-neutron,openstack-neutron-openvswitch,openstack-nova-scheduler,openstack-keystone,openstack-swift-account"
+PACKAGES+=",openstack-swift-container,openstack-swift-object,openstack-swift-plugin-swift3,openstack-swift-proxy"
+PACKAGES+=",openstack-nova-api,openstack-nova-cert,openstack-heat-api-cfn,openstack-heat-api,"
+PACKAGES+=",openstack-ceilometer-central,openstack-ceilometer-polling,openstack-ceilometer-collector,"
+PACKAGES+=",openstack-heat-api-cloudwatch,openstack-heat-engine,openstack-heat-common,openstack-ceilometer-notification"
+PACKAGES+=",hiera,puppet,memcached,keepalived,mariadb,mariadb-server,rabbitmq-server,python-pbr"
+
+LIBGUESTFS_BACKEND=direct virt-customize --install $PACKAGES -a instack.qcow2
+popd
+
+
+#Adding OpenDaylight to overcloud
+pushd stack
+# make a copy of the cached overcloud-full image
+cp overcloud-full.qcow2 overcloud-full-odl.qcow2
+
+# install nessesary packages
+LIBGUESTFS_BACKEND=direct virt-customize --upload /etc/yum.repos.d/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \
+    --install opendaylight,python-networking-odl -a overcloud-full-odl.qcow2
+
+## WORK AROUND
+## when OpenDaylight lands in upstream RDO manager this can be removed
+
+# upload the opendaylight puppet module
+rm -rf puppet-opendaylight
+git clone https://github.com/dfarrell07/puppet-opendaylight
+pushd puppet-opendaylight
+git archive --format=tar.gz --prefix=opendaylight/ HEAD > ../puppet-opendaylight.tar.gz
+popd
+LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ \
+                                         --run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" -a overcloud-full-odl.qcow2
+
+# Patch in OpenDaylight installation and configuration
+LIBGUESTFS_BACKEND=direct virt-customize --upload ../opendaylight-tripleo-heat-templates.patch:/tmp \
+                                         --run-command "cd /usr/share/openstack-tripleo-heat-templates/ && patch -Np1 < /tmp/opendaylight-tripleo-heat-templates.patch" \
+                                         -a instack.qcow2
+LIBGUESTFS_BACKEND=direct virt-customize --upload ../opendaylight-puppet-neutron.patch:/tmp \
+                                         --run-command "cd /etc/puppet/modules/neutron && patch -Np1 < /tmp/opendaylight-puppet-neutron.patch" \
+                                         -a overcloud-full-odl.qcow2
+## END WORK AROUND
+popd
+
 # move and Sanitize private keys from instack.json file
 mv stack/instackenv.json instackenv-virt.json
 sed -i '/pm_password/c\      "pm_password": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
@@ -192,9 +247,9 @@ ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
 set -e
 virsh destroy instack 2> /dev/null || echo -n ''
 virsh undefine instack --remove-all-storage 2> /dev/null || echo -n ''
-virsh destroy baremetalbrbm_0 2> /dev/null || echo -n ''
-virsh undefine baremetalbrbm_0 --remove-all-storage 2> /dev/null || echo -n ''
-virsh destroy baremetalbrbm_1 2> /dev/null || echo -n ''
-virsh undefine baremetalbrbm_1 --remove-all-storage 2> /dev/null || echo -n ''
+for i in \$(seq 0 $vm_index); do
+  virsh destroy baremetalbrbm_brbm1_\$i 2> /dev/null || echo -n ''
+  virsh undefine baremetalbrbm_brbm1_\$i --remove-all-storage 2> /dev/null || echo -n ''
+done
 EOI