switching to RDO build undercloud instead of building it ourselves 29/4029/1
authorDan Radez <dradez@redhat.com>
Wed, 2 Dec 2015 22:03:20 +0000 (17:03 -0500)
committerDan Radez <dradez@redhat.com>
Mon, 7 Dec 2015 23:33:09 +0000 (18:33 -0500)
Change-Id: I9f09c195352d2d88a3da4f42e09424d96d39722b

build/Makefile
build/cache.mk
build/instack.sh
build/opnfv-apex.spec
ci/clean.sh
ci/deploy.sh

index f503e1f..8079932 100644 (file)
@@ -99,7 +99,7 @@ rpm-clean:
 .PHONY: rpm
 rpm:
        pushd ../ && git archive --format=tar --prefix=opnfv-apex-$(RPMVERS)/ HEAD > build/opnfv-apex.tar
-       tar -u --xform="s:instack.qcow2:opnfv-apex-$(RPMVERS)/build/instack.qcow2:" --file=opnfv-apex.tar instack.qcow2
+       tar -u --xform="s:stack/instack.qcow2:opnfv-apex-$(RPMVERS)/build/instack.qcow2:" --file=opnfv-apex.tar stack/instack.qcow2
        tar -u --xform="s:instack.xml:opnfv-apex-$(RPMVERS)/build/instack.xml:" --file=opnfv-apex.tar instack.xml
        tar -u --xform="s:baremetalbrbm_brbm1_0.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_0.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_0.xml
        tar -u --xform="s:baremetalbrbm_brbm1_1.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_1.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_1.xml
@@ -111,14 +111,7 @@ rpm:
        tar -u --xform="s:default-pool.xml:opnfv-apex-$(RPMVERS)/build/default-pool.xml:" --file=opnfv-apex.tar default-pool.xml
        tar -u --xform="s:instackenv-virt.json:opnfv-apex-$(RPMVERS)/build/instackenv-virt.json:" --file=opnfv-apex.tar instackenv-virt.json
        tar -u --xform="s:instackenv.json.example:opnfv-apex-$(RPMVERS)/build/instackenv.json.example:" --file=opnfv-apex.tar instackenv.json.example
-       tar -u --xform="s:stack/deploy-ramdisk-ironic.initramfs:opnfv-apex-$(RPMVERS)/build/stack/deploy-ramdisk-ironic.initramfs:" --file=opnfv-apex.tar stack/deploy-ramdisk-ironic.initramfs
-       tar -u --xform="s:stack/deploy-ramdisk-ironic.kernel:opnfv-apex-$(RPMVERS)/build/stack/deploy-ramdisk-ironic.kernel:" --file=opnfv-apex.tar stack/deploy-ramdisk-ironic.kernel
-       tar -u --xform="s:stack/ironic-python-agent.initramfs:opnfv-apex-$(RPMVERS)/build/stack/ironic-python-agent.initramfs:" --file=opnfv-apex.tar stack/ironic-python-agent.initramfs
-       tar -u --xform="s:stack/ironic-python-agent.kernel:opnfv-apex-$(RPMVERS)/build/stack/ironic-python-agent.kernel:" --file=opnfv-apex.tar stack/ironic-python-agent.kernel
-       tar -u --xform="s:stack/ironic-python-agent.vmlinuz:opnfv-apex-$(RPMVERS)/build/stack/ironic-python-agent.vmlinuz:" --file=opnfv-apex.tar stack/ironic-python-agent.vmlinuz
-       tar -u --xform="s:stack/overcloud-full.initrd:opnfv-apex-$(RPMVERS)/build/stack/overcloud-full.initrd:" --file=opnfv-apex.tar stack/overcloud-full.initrd
        tar -u --xform="s:stack/overcloud-full-odl.qcow2:opnfv-apex-$(RPMVERS)/build/stack/overcloud-full.qcow2:" --file=opnfv-apex.tar stack/overcloud-full-odl.qcow2
-       tar -u --xform="s:stack/overcloud-full.vmlinuz:opnfv-apex-$(RPMVERS)/build/stack/overcloud-full.vmlinuz:" --file=opnfv-apex.tar stack/overcloud-full.vmlinuz
        tar -u --xform="s:network-environment.yaml:opnfv-apex-$(RPMVERS)/build/network-environment.yaml:" --file=opnfv-apex.tar network-environment.yaml
        tar -u --xform="s:opendaylight.yaml:opnfv-apex-$(RPMVERS)/build/opendaylight.yaml:" --file=opnfv-apex.tar opendaylight.yaml
        tar -u --xform="s:opendaylight.patch:opnfv-apex-$(RPMVERS)/build/opendaylight.patch:" --file=opnfv-apex.tar opendaylight.patch
@@ -126,9 +119,7 @@ rpm:
        rpmbuild -ba opnfv-apex.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(REVSTATE) | tr -d '_-')"
 
 .PHONY: instack
-instack: instack.qcow2
-
-instack.qcow2:
+instack:
        @./instack.sh $(USE_MASTER)
 
 .PHONY: instack-clean
@@ -140,10 +131,9 @@ instack-clean:
        rm -f baremetalbrbm_brbm1_3.xml
        rm -f baremetalbrbm_brbm1_4.xml
        rm -f instack.xml
-       rm -f instack.qcow2
 
 .PHONY: iso
-iso:   build-clean instack.qcow2 rpm $(ISOCACHE)
+iso:   build-clean instack rpm $(ISOCACHE)
        @mkdir centos release
        cd centos && bsdtar -xf ../$(shell basename $(ISOSRC))
        # modify the installer iso's contents
index b1e9790..acec36a 100644 (file)
@@ -16,9 +16,8 @@ CACHECLEAN := $(addsuffix .clean,$(CACHEFILES) $(CACHEDIRS))
 # BEGIN of variables to customize
 #
 CACHEFILES += .versions
-CACHEFILES += stack/deploy-ramdisk-ironic.tar
-CACHEFILES += stack/ironic-python-agent.tar
 CACHEFILES += stack/overcloud-full.tar
+CACHEFILES += stack/undercloud.qcow2
 CACHEFILES += $(shell basename $(ISOSRC))
 #
 # END of variables to customize
index f23df67..182d236 100755 (executable)
@@ -40,14 +40,6 @@ fi
 ssh -T ${SSH_OPTIONS[@]} stack@localhost "rm -f instack*.qcow2"
 
 # Yum repo setup for building the undercloud
-if ! rpm -q epel-release > /dev/null; then
-    sudo yum install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
-fi
-
-if ! rpm -q rdo-release > /dev/null; then
-    sudo yum install -y https://rdoproject.org/repos/openstack-${RDO_RELEASE}/rdo-release-${RDO_RELEASE}.rpm
-fi
-
 if ! rpm -q rdo-release > /dev/null && [ "$1" != "-master" ]; then
     sudo yum install -y https://rdoproject.org/repos/openstack-${RDO_RELEASE}/rdo-release-${RDO_RELEASE}.rpm
     sudo rm -rf /etc/yum.repos.d/delorean.repo
@@ -117,6 +109,7 @@ done
 # yum repo, triple-o package and ssh key setup for the undercloud
 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
 set -e
+
 if ! rpm -q epel-release > /dev/null; then
     yum install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
 fi
@@ -124,26 +117,20 @@ fi
 yum -y install yum-plugin-priorities
 curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
 curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
-yum install -y python-tripleoclient
+
 cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys
 chown stack:stack /home/stack/.ssh/authorized_keys
 EOI
 
-# install undercloud on Undercloud VM
-ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "openstack undercloud install"
-
 # copy instackenv file for future virt deployments
 if [ ! -d stack ]; then mkdir stack; fi
 scp ${SSH_OPTIONS[@]} stack@$UNDERCLOUD:instackenv.json stack/instackenv.json
 
-# Clean cache to reduce the images size
-ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "yum clean all"
-
 # make a copy of instack VM's definitions, and disk image
 # it must be stopped to make a copy of its disk image
 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
 set -e
-echo "Shutting down instack to take snapshot"
+echo "Shutting down instack to gather configs"
 virsh shutdown instack
 
 echo "Waiting for instack VM to shutdown"
@@ -180,28 +167,45 @@ scp ${SSH_OPTIONS[@]} stack@localhost:brbm-net.xml .
 scp ${SSH_OPTIONS[@]} stack@localhost:brbm1-net.xml .
 scp ${SSH_OPTIONS[@]} stack@localhost:default-pool.xml .
 
-# copy the instack disk image for inclusion in artifacts
-sudo cp /var/lib/libvirt/images/instack.qcow2 ./instack.qcow2
-
-#sudo chown $(whoami):$(whoami) ./instack.qcow2_
-#virt-sparsify --check-tmpdir=fail ./instack.qcow2_ ./instack.qcow2
-#rm -f ./instack.qcow2_
-
 # pull down the the built images
 echo "Copying overcloud resources"
-IMAGES="deploy-ramdisk-ironic.tar"
-IMAGES+=" ironic-python-agent.tar"
-IMAGES+=" overcloud-full.tar"
-#IMAGES+="undercloud.qcow2"
+IMAGES="overcloud-full.tar"
+IMAGES+=" undercloud.qcow2"
 
 for i in $IMAGES; do
   # download prebuilt images from RDO Project
   if [ "$(curl -L $rdo_images_uri/${i}.md5 | awk {'print $1'})" != "$(md5sum stack/$i | awk {'print $1'})" ] ; then
-    curl https://ci.centos.org/artifacts/rdo/images/liberty/delorean/stable/$i -o stack/$i --verbose --silent --location
+    if [ $i == "undercloud.qcow2" ]; then
+      # there's a problem with the Content-Length reported by the centos artifacts
+      # server so using wget for it until a resolution is figured out.
+      wget -nv -O stack/$i $rdo_images_uri/$i
+    else
+      curl $rdo_images_uri/$i -o stack/$i --verbose --silent --location
+    fi
   fi
-  tar -xf stack/$i -C stack/
+  # only untar the tar files
+  if [ "${i##*.}" == "tar" ]; then tar -xf stack/$i -C stack/; fi
 done
 
+#Adding OpenStack packages to undercloud
+pushd stack
+cp undercloud.qcow2 instack.qcow2
+LIBGUESTFS_BACKEND=direct virt-customize --install yum-priorities -a instack.qcow2
+PACKAGES="qemu-kvm-common,qemu-kvm,libvirt-daemon-kvm,libguestfs,python-libguestfs,openstack-nova-compute"
+PACKAGES+=",openstack-swift,openstack-ceilometer-api,openstack-neutron-ml2,openstack-ceilometer-alarm"
+PACKAGES+=",openstack-nova-conductor,openstack-ironic-inspector,openstack-ironic-api,python-openvswitch"
+PACKAGES+=",openstack-glance,python-glance,python-troveclient,openstack-puppet-modules"
+PACKAGES+=",python-troveclient,openstack-neutron-openvswitch,openstack-nova-scheduler,openstack-keystone,openstack-swift-account"
+PACKAGES+=",openstack-swift-container,openstack-swift-object,openstack-swift-plugin-swift3,openstack-swift-proxy"
+PACKAGES+=",openstack-nova-api,openstack-nova-cert,openstack-heat-api-cfn,openstack-heat-api,"
+PACKAGES+=",openstack-ceilometer-central,openstack-ceilometer-polling,openstack-ceilometer-collector,"
+PACKAGES+=",openstack-heat-api-cloudwatch,openstack-heat-engine,openstack-heat-common,openstack-ceilometer-notification"
+PACKAGES+=",hiera,puppet,memcached,keepalived,mariadb,mariadb-server,rabbitmq-server"
+
+LIBGUESTFS_BACKEND=direct virt-customize --install $PACKAGES -a instack.qcow2
+popd
+
+
 #Adding OpenDaylight to overcloud
 pushd stack
 cp overcloud-full.qcow2 overcloud-full-odl.qcow2
@@ -220,7 +224,8 @@ git clone https://github.com/dfarrell07/puppet-opendaylight
 pushd puppet-opendaylight
 git archive --format=tar.gz --prefix=opendaylight/ HEAD > ../puppet-opendaylight.tar.gz
 popd
-LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ --run-command "cd /etc/puppet/modules/; tar xzf puppet-opendaylight.tar.gz" -a overcloud-full-odl.qcow2
+LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ \
+                                         --run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" -a overcloud-full-odl.qcow2
 popd
 
 # move and Sanitize private keys from instack.json file
index 546a92a..c1e3504 100644 (file)
@@ -1,5 +1,5 @@
 Name:          opnfv-apex
-Version:       2.3
+Version:       2.4
 Release:       %{release}
 Summary:       RDO Manager disk images for deployment
 
@@ -47,14 +47,7 @@ install build/opendaylight.patch %{buildroot}%{_var}/opt/opnfv/
 
 install build/instackenv-virt.json %{buildroot}%{_var}/opt/opnfv/
 install build/instackenv.json.example %{buildroot}%{_var}/opt/opnfv/
-install build/stack/deploy-ramdisk-ironic.initramfs %{buildroot}%{_var}/opt/opnfv/stack/
-install build/stack/deploy-ramdisk-ironic.kernel %{buildroot}%{_var}/opt/opnfv/stack/
-install build/stack/ironic-python-agent.initramfs %{buildroot}%{_var}/opt/opnfv/stack/
-install build/stack/ironic-python-agent.kernel %{buildroot}%{_var}/opt/opnfv/stack/
-install build/stack/ironic-python-agent.vmlinuz %{buildroot}%{_var}/opt/opnfv/stack/
-install build/stack/overcloud-full.initrd %{buildroot}%{_var}/opt/opnfv/stack/
 install build/stack/overcloud-full.qcow2 %{buildroot}%{_var}/opt/opnfv/stack/
-install build/stack/overcloud-full.vmlinuz %{buildroot}%{_var}/opt/opnfv/stack/
 
 mkdir -p %{buildroot}%{_docdir}/opnfv/
 install LICENSE.rst %{buildroot}%{_docdir}/opnfv/
@@ -79,14 +72,7 @@ install docs/release-notes.html %{buildroot}%{_docdir}/opnfv/
 %{_var}/opt/opnfv/opendaylight.patch
 %{_var}/opt/opnfv/instackenv-virt.json
 %{_var}/opt/opnfv/instackenv.json.example
-%{_var}/opt/opnfv/stack/deploy-ramdisk-ironic.initramfs
-%{_var}/opt/opnfv/stack/deploy-ramdisk-ironic.kernel
-%{_var}/opt/opnfv/stack/ironic-python-agent.initramfs
-%{_var}/opt/opnfv/stack/ironic-python-agent.kernel
-%{_var}/opt/opnfv/stack/ironic-python-agent.vmlinuz
-%{_var}/opt/opnfv/stack/overcloud-full.initrd
 %{_var}/opt/opnfv/stack/overcloud-full.qcow2
-%{_var}/opt/opnfv/stack/overcloud-full.vmlinuz
 %doc %{_docdir}/opnfv/LICENSE.rst
 %doc %{_docdir}/opnfv/installation-instructions.rst
 %doc %{_docdir}/opnfv/installation-instructions.html
@@ -95,6 +81,8 @@ install docs/release-notes.html %{buildroot}%{_docdir}/opnfv/
 
 
 %changelog
+* Fri Dec 05 2015 Dan Radez <dradez@redhat.com> - 2.4-1
+- Removing glance images
 * Fri Nov 20 2015 Dan Radez <dradez@redhat.com> - 2.3-1
 - Adding documentation
 * Thu Nov 12 2015 Dan Radez <dradez@redhat.com> - 2.2-1
index 492b0dc..b8be179 100755 (executable)
@@ -28,4 +28,9 @@ virsh net-destroy brbm1 2> /dev/null
 virsh net-undefine brbm1 2> /dev/null
 vs-vsctl del-br brbm1 2> /dev/null
 
+# clean pub keys from root's auth keys
+sed -i '/stack@instack.localdomain/d' /root/.ssh/authorized_keys
+sed -i '/virtual-power-key/d' /root/.ssh/authorized_keys
+
+
 echo "Cleanup Completed"
index 4f1c6b5..2483b84 100755 (executable)
@@ -295,9 +295,17 @@ function setup_instack_vm {
       #Upload instack image
       #virsh vol-create default --file instack.qcow2.xml
       virsh vol-create-as default instack.qcow2 30G --format qcow2
-      virsh vol-upload --pool default --vol instack.qcow2 --file $CONFIG/stack/instack.qcow2
 
-      sleep 1 # this was to let the copy settle, needed with vol-upload?
+      ### this doesn't work for some reason I was getting hangup events so using cp instead
+      #virsh vol-upload --pool default --vol instack.qcow2 --file $CONFIG/stack/instack.qcow2
+      #2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem <http://bugs.centos.org>, 2015-11-03-13:56:46, worker1.bsys.centos.org)
+      #2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
+      #2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
+      #error: cannot close volume instack.qcow2
+      #error: internal error: received hangup / error event on socket
+      #error: Reconnected to the hypervisor
+
+      cp -f $RESOURCES/instack.qcow2 /var/lib/libvirt/images/instack.qcow2
 
   else
       echo "Found Instack VM, using existing VM"
@@ -306,7 +314,8 @@ function setup_instack_vm {
   # if the VM is not running update the authkeys and start it
   if ! virsh list | grep instack > /dev/null; then
     echo "Injecting ssh key to instack VM"
-    virt-customize -c qemu:///system -d instack --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
+    virt-customize -c qemu:///system -d instack --run-command "mkdir /root/.ssh/" \
+        --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
         --run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
         --run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
         --run-command "chown stack:stack /home/stack/.ssh/authorized_keys && chmod 600 /home/stack/.ssh/authorized_keys"
@@ -337,9 +346,9 @@ function setup_instack_vm {
   while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do
       echo -n "."
       sleep 3
-      CNT=CNT-1
+      CNT=$CNT-1
   done
-  if CNT == 0; then
+  if $CNT == 0; then
       echo "Failed to contact Instack. Can Not Continue"
       exit 1
   fi
@@ -347,9 +356,9 @@ function setup_instack_vm {
   while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do
       echo -n "."
       sleep 3
-      CNT=CNT-1
+      CNT=$CNT-1
   done
-  if CNT == 0; then
+  if $CNT == 0; then
       echo "Failed to connect to Instack. Can Not Continue"
       exit 1
   fi
@@ -380,6 +389,7 @@ function setup_virtual_baremetal {
     fi
     virsh vol-list default | grep baremetalbrbm_brbm1_${i} 2>&1> /dev/null || virsh vol-create-as default baremetalbrbm_brbm1_${i}.qcow2 40G --format qcow2
   done
+
 }
 
 ##Copy over the glance images and instack json file
@@ -388,14 +398,7 @@ function copy_materials_to_instack {
 
   echo
   echo "Copying configuration file and disk images to instack"
-  scp ${SSH_OPTIONS[@]} $RESOURCES/deploy-ramdisk-ironic.initramfs "stack@$UNDERCLOUD":
-  scp ${SSH_OPTIONS[@]} $RESOURCES/deploy-ramdisk-ironic.kernel "stack@$UNDERCLOUD":
-  scp ${SSH_OPTIONS[@]} $RESOURCES/ironic-python-agent.initramfs "stack@$UNDERCLOUD":
-  scp ${SSH_OPTIONS[@]} $RESOURCES/ironic-python-agent.kernel "stack@$UNDERCLOUD":
-  scp ${SSH_OPTIONS[@]} $RESOURCES/ironic-python-agent.vmlinuz "stack@$UNDERCLOUD":
-  scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.initrd "stack@$UNDERCLOUD":
   scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.qcow2 "stack@$UNDERCLOUD":
-  scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.vmlinuz "stack@$UNDERCLOUD":
   scp ${SSH_OPTIONS[@]} $NETENV "stack@$UNDERCLOUD":
   scp ${SSH_OPTIONS[@]} $CONFIG/opendaylight.yaml "stack@$UNDERCLOUD":
   scp ${SSH_OPTIONS[@]} -r $CONFIG/nics/ "stack@$UNDERCLOUD":
@@ -420,6 +423,12 @@ function copy_materials_to_instack {
   ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
 
   if [ "$virtual" == "TRUE" ]; then
+
+      # copy the instack vm's stack user's pub key to
+      # root's auth keys so that instack can control
+      # vm power on the hypervisor
+      ssh ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> /root/.ssh/authorized_keys
+
       # fix MACs to match new setup
       for i in $(seq 0 $vm_index); do
         pyscript="import json
@@ -462,6 +471,9 @@ ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >>
 ##preping it for deployment and launch the deploy
 ##params: none
 function undercloud_prep_overcloud_deploy {
+# configure undercloud on Undercloud VM
+ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "openstack undercloud install > apex-undercloud-install.log"
+
   # check if HA is enabled
   if [ $ha_enabled == "TRUE" ]; then
      DEPLOY_OPTIONS+=" --control-scale 3 --compute-scale 2"