Merge "Restrict RTD Verify and Merge jobs to Releng"
authorTrevor Bramwell <tbramwell@linuxfoundation.org>
Wed, 27 Jun 2018 18:18:34 +0000 (18:18 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Wed, 27 Jun 2018 18:18:34 +0000 (18:18 +0000)
.gitignore
jjb/3rd_party_ci/download-netvirt-artifact.sh
jjb/3rd_party_ci/install-netvirt.sh
jjb/3rd_party_ci/odl-netvirt.yaml
jjb/apex/apex-snapshot-deploy.sh
jjb/doctor/doctor.yaml
jjb/functest/functest-env-presetup.sh
jjb/releng/releng-release-tagging.sh
utils/build-server-ansible/main.yml
utils/fetch_os_creds.sh

index 7790d46..9ee8c53 100644 (file)
@@ -1,5 +1,6 @@
 *~
 .*.sw?
+*.swp
 /docs_build/
 /docs_output/
 /releng/
index 5a50e8a..b1f977a 100755 (executable)
@@ -3,20 +3,21 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
-ODL_ZIP=distribution-karaf-0.6.0-SNAPSHOT.zip
-
 echo "Attempting to fetch the artifact location from ODL Jenkins"
 if [ "$ODL_BRANCH" != 'master' ]; then
   DIST=$(echo ${ODL_BRANCH} | sed -rn 's#([a-zA-Z]+)/([a-zA-Z]+)#\2#p')
   ODL_BRANCH=$(echo ${ODL_BRANCH} | sed -rn 's#([a-zA-Z]+)/([a-zA-Z]+)#\1%2F\2#p')
 else
-  DIST='flourine'
+  DIST='fluorine'
 fi
+
+echo "ODL Distribution is ${DIST}"
+ODL_ZIP="karaf-SNAPSHOT.zip"
 CHANGE_DETAILS_URL="https://git.opendaylight.org/gerrit/changes/netvirt~${ODL_BRANCH}~${GERRIT_CHANGE_ID}/detail"
 # due to limitation with the Jenkins Gerrit Trigger, we need to use Gerrit REST API to get the change details
-ODL_BUILD_JOB_NUM=$(curl --fail -s ${CHANGE_DETAILS_URL} | grep -Eo "netvirt-distribution-check-${DIST}/[0-9]+" | tail -1 | grep -Eo [0-9]+)
-DISTRO_CHECK_CONSOLE_LOG="https://logs.opendaylight.org/releng/jenkins092/netvirt-distribution-check-${DIST}/${ODL_BUILD_JOB_NUM}/console.log.gz"
-NETVIRT_ARTIFACT_URL=$(curl --fail -s --compressed ${DISTRO_CHECK_CONSOLE_LOG} | grep 'BUNDLE_URL' | cut -d = -f 2)
+ODL_BUILD_JOB_NUM=$(curl --fail ${CHANGE_DETAILS_URL} | grep -Eo "netvirt-distribution-check-${DIST}/[0-9]+" | tail -1 | grep -Eo [0-9]+)
+DISTRO_CHECK_CONSOLE_LOG="https://logs.opendaylight.org/releng/vex-yul-odl-jenkins-1/netvirt-distribution-check-${DIST}/${ODL_BUILD_JOB_NUM}/console.log.gz"
+NETVIRT_ARTIFACT_URL=$(curl --fail --compressed ${DISTRO_CHECK_CONSOLE_LOG} | grep 'BUNDLE_URL' | cut -d = -f 2)
 
 echo -e "URL to artifact is\n\t$NETVIRT_ARTIFACT_URL"
 
@@ -30,8 +31,9 @@ fi
 
 #TODO(trozet) remove this once odl-pipeline accepts zip files
 echo "Converting artifact zip to tar.gz"
-unzip $ODL_ZIP
-tar czf /tmp/${NETVIRT_ARTIFACT} $(echo $ODL_ZIP | sed -n 's/\.zip//p')
+UNZIPPED_DIR=`dirname $(unzip -qql ${ODL_ZIP} | head -n1 | tr -s ' ' | cut -d' ' -f5-)`
+unzip ${ODL_ZIP}
+tar czf /tmp/${NETVIRT_ARTIFACT} ${UNZIPPED_DIR}
 
 echo "Download complete"
 ls -al /tmp/${NETVIRT_ARTIFACT}
index ed1a12b..07bbe77 100755 (executable)
@@ -26,8 +26,7 @@ fi
 # but we really should check the cache here, and not use a single cache folder
 # for when we support multiple jobs on a single slave
 pushd sdnvpn/odl-pipeline/lib > /dev/null
-# FIXME (trozet) remove this once permissions are fixed in sdnvpn repo
-chmod +x odl_reinstaller.sh
+git fetch https://gerrit.opnfv.org/gerrit/sdnvpn refs/changes/17/59017/5 && git checkout FETCH_HEAD
 ./odl_reinstaller.sh --pod-config ${SNAP_CACHE}/node.yaml \
   --odl-artifact /tmp/${NETVIRT_ARTIFACT} --ssh-key-file ${SNAP_CACHE}/id_rsa
 popd > /dev/null
index c077fce..3a46e79 100644 (file)
               predefined-parameters: |
                 DEPLOY_SCENARIO=os-odl-nofeature-ha
                 FUNCTEST_MODE=testcase
-                FUNCTEST_SUITE_NAME=odl_netvirt
+                FUNCTEST_SUITE_NAME=tempest_smoke_serial
                 RC_FILE_PATH=$HOME/cloner-info/overcloudrc
               node-parameters: true
               kill-phase-on: FAILURE
index 1810a00..0760626 100644 (file)
@@ -133,6 +133,10 @@ for node_def in ${virsh_vm_defs}; do
   sudo virsh define ${node_def}
   node=$(echo ${node_def} | awk -F '.' '{print $1}')
   sudo cp -f ${node}.qcow2 /var/lib/libvirt/images/
+  # FIXME (trozet) install java on each disk image as required to upgrade ODL
+  # should be added to Apex as part of the deployment. Remove this after that
+  # is complete
+  sudo LIBGUESTFS_BACKEND=direct virt-customize --install java-1.8.0-openjdk -a /var/lib/libvirt/images/${node}.qcow2
   sudo virsh start ${node}
   echo "Node: ${node} started"
 done
index 1c12563..0d7b781 100644 (file)
         arch: 'aarch64'
       - installer: 'daisy'
         arch: 'aarch64'
+      # disabling the following tests due to limitation of PoD owners
+      # these would beenabled again once the PoDs are ready
+      - installer: 'fuel'
+        arch: 'x86_64'
+      - installer: 'daisy'
+        arch: 'x86_64'
 
     jobs:
       - 'doctor-verify-{inspector}-{stream}'
index 323b325..81718a5 100755 (executable)
@@ -5,27 +5,31 @@ set -o pipefail
 
 # Fetch INSTALLER_IP for APEX deployments
 if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
-    echo "Gathering IP information for Apex installer VM"
-    ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
-    if sudo virsh list | grep undercloud; then
-        echo "Installer VM detected"
-        undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
+    if [ -n "$RC_FILE_PATH" ]; then
+        echo "RC_FILE_PATH is set: ${RC_FILE_PATH}...skipping detecting UC IP"
+    else
+        echo "Gathering IP information for Apex installer VM"
+        ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+        if sudo virsh list | grep undercloud; then
+            echo "Installer VM detected"
+            undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
                       grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
-        export INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
-        export sshkey_vol="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
-        sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc
-        export stackrc_vol="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc"
-
-        if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then
-            sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
-        fi
-        if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then
-          sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
+            export INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
+            export sshkey_vol="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
+            sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc
+            export stackrc_vol="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc"
+
+            if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then
+                sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
+            fi
+            if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then
+                sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
+            fi
+            echo "Installer ip is ${INSTALLER_IP}"
+        else
+            echo "No available installer VM exists and no credentials provided...exiting"
+            exit 1
         fi
-        echo "Installer ip is ${INSTALLER_IP}"
-    else
-        echo "No available installer VM exists and no credentials provided...exiting"
-        exit 1
     fi
 
 elif [[ ${INSTALLER_TYPE} == 'daisy' ]]; then
index 10c0cc8..e1b9351 100644 (file)
@@ -57,7 +57,7 @@ for release_file in $RELEASE_FILES; do
               echo "--> Creating $tag tag for $repo at $ref"
               git tag -am "$tag" $tag $ref
               echo "--> Pushing tag"
-              echo "[noop] git push origin $tag"
+              git push origin $tag
           else
               # For non-merge jobs just output the ref info.
               git show -s --format="%h %s %d" $ref
index 0fcce71..c9f244b 100644 (file)
       pip:
         name: tox
         state: present
+    - name: install yamllint
+      pip:
+        name: yamllint
+        state: present
     - include: vars/docker-compose-CentOS.yml
       when: ansible_distribution == "CentOS"
     - include: vars/docker-compose-Ubuntu.yml
index b40b75b..0e041c6 100755 (executable)
@@ -149,29 +149,33 @@ if [ "$installer_type" == "fuel" ]; then
     echo $auth_url >> $dest_path
 
 elif [ "$installer_type" == "apex" ]; then
-    if ! ipcalc -c $installer_ip; then
-      installer_ip=$(sudo virsh domifaddr undercloud | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
-      if [ -z "$installer_ip" ] || ! $(ipcalc -c $installer_ip); then
-        echo "Unable to find valid IP for Apex undercloud: ${installer_ip}"
-        exit 1
-      fi
-    fi
-    verify_connectivity $installer_ip
+    if [ -n "$RC_FILE_PATH" ]; then
+        echo "RC_FILE_PATH is set: ${RC_FILE_PATH}. Copying RC FILE to ${dest_path}"
+        sudo cp -f ${RC_FILE_PATH} ${dest_path}
+    else
+        if ! ipcalc -c $installer_ip; then
+            installer_ip=$(sudo virsh domifaddr undercloud | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
+            if [ -z "$installer_ip" ] || ! $(ipcalc -c $installer_ip); then
+                echo "Unable to find valid IP for Apex undercloud: ${installer_ip}"
+                exit 1
+            fi
+        fi
+        verify_connectivity $installer_ip
 
-    # The credentials file is located in the Instack VM (192.0.2.1)
-    # NOTE: This might change for bare metal deployments
-    info "... from Instack VM $installer_ip..."
-    if [ -f /root/.ssh/id_rsa ]; then
-        chmod 600 /root/.ssh/id_rsa
-    fi
+        # The credentials file is located in the Instack VM (192.0.2.1)
+        # NOTE: This might change for bare metal deployments
+        info "... from Instack VM $installer_ip..."
+        if [ -f /root/.ssh/id_rsa ]; then
+            chmod 600 /root/.ssh/id_rsa
+        fi
 
-    if [ "${BRANCH}" == "stable/fraser" ]; then
-      rc_file=overcloudrc.v3
-    else
-      rc_file=overcloudrc
+        if [ "${BRANCH}" == "stable/fraser" ]; then
+            rc_file=overcloudrc.v3
+        else
+            rc_file=overcloudrc
+        fi
+        sudo scp $ssh_options root@$installer_ip:/home/stack/${rc_file} $dest_path
     fi
-    sudo scp $ssh_options root@$installer_ip:/home/stack/${rc_file} $dest_path
-
 elif [ "$installer_type" == "compass" ]; then
     if [ "${BRANCH}" == "stable/danube" ]; then
         verify_connectivity $installer_ip