Merge "Add lint to releng repo"
authorJose Lausuch <jose.lausuch@ericsson.com>
Mon, 13 Feb 2017 19:11:10 +0000 (19:11 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Mon, 13 Feb 2017 19:11:10 +0000 (19:11 +0000)
24 files changed:
docs/jenkins-job-builder/opnfv-jjb-usage.rst
jjb/3rd_party_ci/download-netvirt-artifact.sh
jjb/3rd_party_ci/install-netvirt.sh
jjb/3rd_party_ci/odl-netvirt.yml
jjb/apex/apex-snapshot-deploy.sh
jjb/apex/apex.yml
jjb/copper/copper.yml
jjb/daisy4nfv/daisy-project-jobs.yml
jjb/daisy4nfv/daisy4nfv-verify-jobs.yml
jjb/functest/functest-ci-jobs.yml
jjb/functest/functest-cleanup.sh
jjb/functest/set-functest-env.sh
jjb/global/slave-params.yml
jjb/releng/opnfv-docker-arm.yml [new file with mode: 0644]
jjb/releng/opnfv-docker.sh
jjb/releng/opnfv-docker.yml
jjb/releng/testapi-automate.yml
jjb/releng/testapi-backup-mongodb.sh
jjb/releng/testapi-docker-deploy.sh
utils/test/testapi/htmlize/htmlize.py
utils/test/testapi/opnfv_testapi/resources/handlers.py
utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py
utils/test/testapi/opnfv_testapi/resources/scenario_models.py
utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py

index 73b31b2..fc968f8 100644 (file)
@@ -39,11 +39,24 @@ Job Types
 
   * Trigger: **remerge**
 
+* Experimental Job
+
+  * Trigger: **check-experimental**
+
 The verify and merge jobs are retriggerable in Gerrit by simply leaving
 a comment with one of the keywords listed above.
 This is useful in case you need to re-run one of those jobs in case
 if build issues or something changed with the environment.
 
+The experimental jobs are not triggered automatically. You need to leave
+a comment with the keyword list above to trigger it manually. It is useful
+for trying out experimental features.
+
+Note that, experimental jobs `skip vote`_ for verified status, which means
+it will reset the verified status to 0. If you want to keep the verified
+status, use **recheck-experimental** in commit message to trigger both
+verify and experimental jobs.
+
 You can add below persons as reviewers to your patch in order to get it
 reviewed and submitted.
 
@@ -67,3 +80,5 @@ in `releng-jobs.yaml`_.
 
 .. _releng-jobs.yaml:
     https://gerrit.opnfv.org/gerrit/gitweb?p=releng.git;a=blob;f=jjb/releng-jobs.yaml;
+.. _skip vote:
+    https://wiki.jenkins-ci.org/display/JENKINS/Gerrit+Trigger#GerritTrigger-SkipVote
\ No newline at end of file
index fe8066c..6aea01d 100755 (executable)
@@ -3,20 +3,28 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
+ODL_ZIP=distribution-karaf-0.6.0-SNAPSHOT.zip
+
 echo "Attempting to fetch the artifact location from ODL Jenkins"
 CHANGE_DETAILS_URL="https://git.opendaylight.org/gerrit/changes/netvirt~master~$GERRIT_CHANGE_ID/detail"
 # due to limitation with the Jenkins Gerrit Trigger, we need to use Gerrit REST API to get the change details
-ODL_JOB_URL=$(curl -s $CHANGE_DETAILS_URL | grep netvirt-patch-test-current-carbon | tail -1 | \
-    sed 's/\\n//g' | awk '{print $6}')
-NETVIRT_ARTIFACT_URL="${ODL_JOB_URL}org.opendaylight.integration\$distribution-karaf/artifact/org.opendaylight.integration/distribution-karaf/0.6.0-SNAPSHOT/distribution-karaf-0.6.0-SNAPSHOT.tar.gz"
+ODL_BUILD_JOB_NUM=$(curl -s $CHANGE_DETAILS_URL | grep -Eo 'netvirt-distribution-check-carbon/[0-9]+' | tail -1 | grep -Eo [0-9]+)
+
+NETVIRT_ARTIFACT_URL="https://jenkins.opendaylight.org/releng/job/netvirt-distribution-check-carbon/${ODL_BUILD_JOB_NUM}/artifact/${ODL_ZIP}"
 echo -e "URL to artifact is\n\t$NETVIRT_ARTIFACT_URL"
 
 echo "Downloading the artifact. This could take time..."
-wget -q -O $NETVIRT_ARTIFACT $NETVIRT_ARTIFACT_URL
+wget -q -O $ODL_ZIP $NETVIRT_ARTIFACT_URL
 if [[ $? -ne 0 ]]; then
     echo "The artifact does not exist! Probably removed due to ODL Jenkins artifact retention policy."
     echo "Rerun netvirt-patch-test-current-carbon to get artifact rebuilt."
     exit 1
 fi
+
+#TODO(trozet) remove this once odl-pipeline accepts zip files
+echo "Converting artifact zip to tar.gz"
+unzip $ODL_ZIP
+tar czf /tmp/${NETVIRT_ARTIFACT} $(echo $ODL_ZIP | sed -n 's/\.zip//p')
+
 echo "Download complete"
-ls -al $NETVIRT_ARTIFACT
+ls -al /tmp/${NETVIRT_ARTIFACT}
index ce2a50c..ed1a12b 100755 (executable)
@@ -7,8 +7,18 @@ SNAP_CACHE=$HOME/snap_cache
 # clone opnfv sdnvpn repo
 git clone https://gerrit.opnfv.org/gerrit/p/sdnvpn.git $WORKSPACE/sdnvpn
 
-if [ ! -f "$NETVIRT_ARTIFACT" ]; then
-  echo "ERROR: ${NETVIRT_ARTIFACT} specified as NetVirt Artifact, but file does not exist"
+if [ ! -f "/tmp/${NETVIRT_ARTIFACT}" ]; then
+  echo "ERROR: /tmp/${NETVIRT_ARTIFACT} specified as NetVirt Artifact, but file does not exist"
+  exit 1
+fi
+
+if [ ! -f "${SNAP_CACHE}/node.yaml" ]; then
+  echo "ERROR: node.yaml pod config missing in ${SNAP_CACHE}"
+  exit 1
+fi
+
+if [ ! -f "${SNAP_CACHE}/id_rsa" ]; then
+  echo "ERROR: id_rsa ssh creds missing in ${SNAP_CACHE}"
   exit 1
 fi
 
@@ -16,6 +26,8 @@ fi
 # but we really should check the cache here, and not use a single cache folder
 # for when we support multiple jobs on a single slave
 pushd sdnvpn/odl-pipeline/lib > /dev/null
+# FIXME (trozet) remove this once permissions are fixed in sdnvpn repo
+chmod +x odl_reinstaller.sh
 ./odl_reinstaller.sh --pod-config ${SNAP_CACHE}/node.yaml \
-  --odl-artifact ${NETVIRT_ARTIFACT} --ssh-key-file ${SNAP_CACHE}/id_rsa
+  --odl-artifact /tmp/${NETVIRT_ARTIFACT} --ssh-key-file ${SNAP_CACHE}/id_rsa
 popd > /dev/null
index f3a4c02..dcc6ac1 100644 (file)
@@ -62,8 +62,8 @@
             branch: '{branch}'
         - string:
             name: NETVIRT_ARTIFACT
-            default: $WORKSPACE/distribution-karaf.tar.gz
-        - 'odl-netvirt-virtual-defaults'
+            default: distribution-karaf.tar.gz
+        - 'odl-netvirt-virtual-intel-defaults'
 
     triggers:
         - gerrit:
                     GERRIT_PATCHSET_REVISION=$GERRIT_PATCHSET_REVISION
                     NETVIRT_ARTIFACT=$NETVIRT_ARTIFACT
                     APEX_ENV_NUMBER=$APEX_ENV_NUMBER
-                  node-parameters: false
+                  node-parameters: true
                   kill-phase-on: FAILURE
                   abort-all-job: true
         - multijob:
                   predefined-parameters: |
                     DEPLOY_SCENARIO=os-odl_l3-nofeature-ha
                     FUNCTEST_SUITE_NAME=healthcheck
+                    RC_FILE_PATH=$HOME/cloner-info/overcloudrc
                   node-parameters: true
                   kill-phase-on: FAILURE
                   abort-all-job: false
index a99955f..05a2d48 100644 (file)
@@ -41,23 +41,26 @@ fi
 local_snap_checksum=""
 
 # check snap cache directory exists
+# if snapshot cache exists, find the checksum
 if [ -d "$SNAP_CACHE" ]; then
-  latest_snap=$(ls -Art | grep tar.gz | tail -n 1)
+  latest_snap=$(ls ${SNAP_CACHE} | grep tar.gz | tail -n 1)
   if [ -n "$latest_snap" ]; then
-    local_snap_checksum=$(sha512sum ${latest_snap} | cut -d' ' -f1)
+    local_snap_checksum=$(sha512sum ${SNAP_CACHE}/${latest_snap} | cut -d' ' -f1)
   fi
 else
   mkdir -p ${SNAP_CACHE}
 fi
 
 # compare check sum and download latest snap if not up to date
-if [ "$local_snap_checksum" -ne "$latest_snap_checksum" ]; then
+if [ "$local_snap_checksum" != "$latest_snap_checksum" ]; then
   snap_url=$(cat opnfv.properties | grep OPNFV_SNAP_URL | awk -F "=" '{print $2}')
   if [ -z "$snap_url" ]; then
     echo "ERROR: Snap URL from snapshot.properties is null!"
     exit 1
   fi
   echo "INFO: SHA mismatch, will download latest snapshot"
+  # wipe cache
+  rm -rf ${SNAP_CACHE}/*
   wget --directory-prefix=${SNAP_CACHE}/ ${snap_url}
   snap_tar=$(basename ${snap_url})
 else
@@ -66,9 +69,8 @@ fi
 
 echo "INFO: Snapshot to be used is ${snap_tar}"
 
-# create tmp directory and unpack snap
-mkdir -p ./tmp
-pushd ./tmp > /dev/null
+# move to snap cache dir and unpack
+pushd ${SNAP_CACHE} > /dev/null
 tar xvf ${snap_tar}
 
 # create each network
@@ -86,22 +88,22 @@ for network_def in ${virsh_networks}; do
     sudo virsh net-start ${network}
   fi
   echo "Checking if OVS bridge is missing for network: ${network}"
-  if ! ovs-vsctl show | grep "br-${network}"; then
-    ovs-vsctl add-br br-${network}
+  if ! sudo ovs-vsctl show | grep "br-${network}"; then
+    sudo ovs-vsctl add-br br-${network}
     echo "OVS Bridge created: br-${network}"
     if [ "br-${network}" == 'br-admin' ]; then
       echo "Configuring IP 192.0.2.99 on br-admin"
       sudo ip addr add  192.0.2.99/24 dev br-admin
       sudo ip link set up dev br-admin
     elif [ "br-${network}" == 'br-external' ]; then
-      echo "Configuring IP 192.168.37.99 on br-external"
-      sudo ip addr add  192.168.37.99/24 dev br-external
+      echo "Configuring IP 192.168.37.1 on br-external"
+      sudo ip addr add  192.168.37.1/24 dev br-external
       sudo ip link set up dev br-external
     fi
   fi
 done
 
-echo "Virsh networks up: $(virsh net-list)"
+echo "Virsh networks up: $(sudo virsh net-list)"
 echo "Bringing up Overcloud VMs..."
 virsh_vm_defs=$(ls baremetal*.xml)
 
@@ -134,9 +136,9 @@ netvirt_url="http://${admin_controller_ip}:8081/restconf/operational/network-top
 source overcloudrc
 counter=1
 while [ "$counter" -le 10 ]; do
-  if curl --fail ${admin_controller_ip}:80; then
+  if curl --fail --silent ${admin_controller_ip}:80 > /dev/null; then
     echo "Overcloud Horizon is up...Checking if OpenDaylight NetVirt is up..."
-    if curl --fail ${netvirt_url} > /dev/null; then
+    if curl --fail --silent -u admin:admin ${netvirt_url} > /dev/null; then
       echo "OpenDaylight is up.  Overcloud deployment complete"
       exit 0
     else
index e3f0f53..ff9fbec 100644 (file)
                 build-step-failure-threshold: 'never'
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
+        - trigger-builds:
+          - project: 'apex-deploy-baremetal-os-odl-gluon-noha-{stream}'
+            predefined-parameters: |
+              BUILD_DIRECTORY=apex-build-{stream}/.build
+              OPNFV_CLEAN=yes
+            git-revision: true
+            same-node: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+            block: true
+        - trigger-builds:
+          - project: 'functest-apex-{daily-slave}-daily-{stream}'
+            predefined-parameters:
+              DEPLOY_SCENARIO=os-odl-gluon-noha
+            block: true
+            same-node: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+        - trigger-builds:
+          - project: 'yardstick-apex-{slave}-daily-{stream}'
+            predefined-parameters:
+              DEPLOY_SCENARIO=os-odl-gluon-noha
+            block: true
+            same-node: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
 # Colorado Build
 - job-template:
     name: 'apex-build-colorado'
index eff66ba..ea1af47 100644 (file)
 
     builders:
         - shell: |
-            echo "Nothing to verify!"
+            #!/bin/bash
+            set -o errexit
+            set -o nounset
+            set -o pipefail
+
+            cd $WORKSPACE/ci
+            shellcheck -f tty tests/*.sh
index f712adb..1567409 100644 (file)
     name: 'daisy-build-daily-macro'
     builders:
         - shell:
-            !include-raw-escape: ./daisy4nfv-basic.sh
+            !include-raw: ./daisy4nfv-basic.sh
         - shell:
-            !include-raw-escape: ./daisy4nfv-build.sh
+            !include-raw: ./daisy4nfv-build.sh
         - shell:
-            !include-raw-escape: ./daisy4nfv-upload-artifact.sh
+            !include-raw: ./daisy4nfv-upload-artifact.sh
         - shell:
-            !include-raw-escape: ./daisy4nfv-workspace-cleanup.sh
+            !include-raw: ./daisy4nfv-workspace-cleanup.sh
 
 - builder:
     name: 'daisy-deploy-daily-macro'
     builders:
         - shell:
-            !include-raw-escape: ./daisy4nfv-download-artifact.sh
+            !include-raw: ./daisy4nfv-download-artifact.sh
         - shell:
-            !include-raw-escape: ./daisy4nfv-deploy.sh
+            !include-raw: ./daisy4nfv-deploy.sh
 
 - builder:
     name: 'daisy-test-daily-macro'
index febce6f..ee82c14 100644 (file)
     name: 'daisy-verify-build-macro'
     builders:
         - shell:
-            !include-raw-escape: ./daisy4nfv-basic.sh
+            !include-raw: ./daisy4nfv-basic.sh
         - shell:
-            !include-raw-escape: ./daisy4nfv-build.sh
+            !include-raw: ./daisy4nfv-build.sh
         - shell:
-            !include-raw-escape: ./daisy4nfv-workspace-cleanup.sh
+            !include-raw: ./daisy4nfv-workspace-cleanup.sh
 
 #####################################
 # parameter macros
index a2b5aa1..49901be 100644 (file)
         - string:
             name: CLEAN_DOCKER_IMAGES
             default: 'false'
-            description: 'Remove downloaded docker images (opnfv/functest:*)'
+            description: 'Remove downloaded docker images (opnfv/functest*:*)'
         - functest-parameter:
             gs-pathname: '{gs-pathname}'
 
index b03d477..fc277b9 100755 (executable)
@@ -3,14 +3,22 @@
 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 
 echo "Cleaning up docker containers/images..."
+HOST_ARCH=$(uname -m)
 FUNCTEST_IMAGE=opnfv/functest
-# Remove containers along with image opnfv/functest:<none>
+if [ "$HOST_ARCH" = "aarch64" ]; then
+    FUNCTEST_IMAGE="${FUNCTEST_IMAGE}_${HOST_ARCH}"
+fi
+
+# Remove containers along with image opnfv/functest*:<none>
 dangling_images=($(docker images -f "dangling=true" | grep $FUNCTEST_IMAGE | awk '{print $3}'))
 if [[ -n ${dangling_images} ]]; then
     echo "  Removing $FUNCTEST_IMAGE:<none> images and their containers..."
     for image_id in "${dangling_images[@]}"; do
         echo "      Removing image_id: $image_id and its containers"
-        docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect}
+        containers=$(docker ps -a | grep $image_id | awk '{print $1}')
+        if [[ -n "$containers" ]];then
+            docker rm -f $containers >${redirect}
+        fi
         docker rmi $image_id >${redirect}
     done
 fi
index afd656f..abec480 100755 (executable)
@@ -14,8 +14,9 @@ if [[ ${INSTALLER_TYPE} == 'joid' ]]; then
 fi
 
 if [[ ${RC_FILE_PATH} != '' ]] && [[ -f ${RC_FILE_PATH} ]] ; then
+    echo "Credentials file detected: ${RC_FILE_PATH}"
     # volume if credentials file path is given to Functest
-    rc_file_vol="-v $RC_FILE_PATH:/home/opnfv/functest/conf/openstack.creds"
+    rc_file_vol="-v ${RC_FILE_PATH}:/home/opnfv/functest/conf/openstack.creds"
 fi
 
 
@@ -70,17 +71,22 @@ envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
 
 volumes="${results_vol} ${sshkey_vol} ${stackrc_vol} ${rc_file_vol}"
 
+HOST_ARCH=$(uname -m)
+FUNCTEST_IMAGE="opnfv/functest"
+if [ "$HOST_ARCH" = "aarch64" ]; then
+    FUNCTEST_IMAGE="${FUNCTEST_IMAGE}_${HOST_ARCH}"
+fi
 
-echo "Functest: Pulling image opnfv/functest:${DOCKER_TAG}"
-docker pull opnfv/functest:$DOCKER_TAG >/dev/null
+echo "Functest: Pulling image ${FUNCTEST_IMAGE}:${DOCKER_TAG}"
+docker pull ${FUNCTEST_IMAGE}:$DOCKER_TAG >/dev/null
 
 cmd="sudo docker run --privileged=true -id ${envs} ${volumes} \
      ${custom_params} ${TESTCASE_OPTIONS} \
-     opnfv/functest:${DOCKER_TAG} /bin/bash"
+     ${FUNCTEST_IMAGE}:${DOCKER_TAG} /bin/bash"
 echo "Functest: Running docker run command: ${cmd}"
 ${cmd} >${redirect}
 sleep 5
-container_id=$(docker ps | grep "opnfv/functest:${DOCKER_TAG}" | awk '{print $1}' | head -1)
+container_id=$(docker ps | grep "${FUNCTEST_IMAGE}:${DOCKER_TAG}" | awk '{print $1}' | head -1)
 echo "Container ID=${container_id}"
 if [ -z ${container_id} ]; then
     echo "Cannot find opnfv/functest container ID ${container_id}. Please check if it is existing."
@@ -91,8 +97,8 @@ echo "Starting the container: docker start ${container_id}"
 docker start ${container_id}
 sleep 5
 docker ps >${redirect}
-if [ $(docker ps | grep "opnfv/functest:${DOCKER_TAG}" | wc -l) == 0 ]; then
-    echo "The container opnfv/functest with ID=${container_id} has not been properly started. Exiting..."
+if [ $(docker ps | grep "${FUNCTEST_IMAGE}:${DOCKER_TAG}" | wc -l) == 0 ]; then
+    echo "The container ${FUNCTEST_IMAGE} with ID=${container_id} has not been properly started. Exiting..."
     exit 1
 fi
 if [[ "$BRANCH" =~ 'brahmaputra' ]]; then
index 0aeab4c..429828e 100644 (file)
             name: GIT_BASE
             default: https://gerrit.opnfv.org/gerrit/$PROJECT
             description: 'Git URL to use on this Jenkins Slave'
+- parameter:
+    name: 'opnfv-build-ubuntu-arm-defaults'
+    parameters:
+        - label:
+            name: SLAVE_LABEL
+            default: 'opnfv-build-ubuntu-arm'
+            description: 'Slave label on Jenkins'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: 'Git URL to use on this Jenkins Slave'
+        - string:
+            name: BUILD_DIRECTORY
+            default: $WORKSPACE/build_output
+            description: "Directory where the build artifact will be located upon the completion of the build."
 #####################################################
 # Parameters for none-CI PODs
 #####################################################
diff --git a/jjb/releng/opnfv-docker-arm.yml b/jjb/releng/opnfv-docker-arm.yml
new file mode 100644 (file)
index 0000000..09c9f33
--- /dev/null
@@ -0,0 +1,77 @@
+##############################################
+# job configuration for docker build and push
+##############################################
+
+- project:
+
+    name: opnfv-docker-arm
+
+    master: &master
+        stream: master
+        branch: '{stream}'
+        disabled: false
+    danube: &danube
+        stream: danube
+        branch: 'stable/{stream}'
+        disabled: true
+    functest-arm-receivers: &functest-arm-receivers
+        receivers: >
+            cristina.pauna@enea.com
+            alexandru.avadanii@enea.com
+    other-receivers: &other-receivers
+        receivers: ''
+
+    project:
+        # projects with jobs for master
+        - 'functest':
+            <<: *master
+            <<: *functest-arm-receivers
+        # projects with jobs for stable
+
+    jobs:
+        - '{project}-docker-build-arm-push-{stream}'
+
+########################
+# job templates
+########################
+- job-template:
+    name: '{project}-docker-build-arm-push-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    parameters: &parameters
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - 'opnfv-build-ubuntu-arm-defaults'
+        - string:
+            name: PUSH_IMAGE
+            default: "true"
+            description: "To enable/disable pushing the image to Dockerhub."
+        - string:
+            name: DOCKER_REPO_NAME
+            default: "opnfv/{project}_aarch64"
+            description: "Dockerhub repo to be pushed to."
+        - string:
+            name: RELEASE_VERSION
+            default: ""
+            description: "Release version, e.g. 1.0, 2.0, 3.0"
+        - string:
+            name: DOCKERFILE
+            default: "Dockerfile.aarch64"
+            description: "Dockerfile to use for creating the image."
+
+    scm:
+        - git-scm
+
+    builders: &builders
+        - shell:
+            !include-raw-escape: ./opnfv-docker.sh
+
+    triggers:
+        - pollscm:
+            cron: "*/30 * * * *"
+
+    publishers:
+        - email:
+            recipients: '{receivers}'
index 40669bc..c906e1f 100644 (file)
@@ -12,6 +12,7 @@ set -o nounset
 set -o pipefail
 
 
+
 echo "Starting opnfv-docker for $DOCKER_REPO_NAME ..."
 echo "--------------------------------------------------------"
 echo
@@ -51,10 +52,8 @@ if [[ -n "$(docker images | grep $DOCKER_REPO_NAME)" ]]; then
     done
 fi
 
-
-# cd to directory where Dockerfile is located
 cd $WORKSPACE/docker
-if [ ! -f ./Dockerfile ]; then
+if [ ! -f ${DOCKERFILE} ]; then
     echo "ERROR: Dockerfile not found."
     exit 1
 fi
@@ -78,7 +77,8 @@ fi
 echo "Building docker image: $DOCKER_REPO_NAME:$DOCKER_TAG"
 echo "--------------------------------------------------------"
 echo
-cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BRANCH ."
+cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BRANCH
+    -f $DOCKERFILE ."
 
 echo ${cmd}
 ${cmd}
index 70d38f2..90a91f8 100644 (file)
             name: RELEASE_VERSION
             default: ""
             description: "Release version, e.g. 1.0, 2.0, 3.0"
+        - string:
+            name: DOCKERFILE
+            default: "Dockerfile"
+            description: "Dockerfile to use for creating the image."
 
     scm:
         - git-scm
index 0b6c36e..dd76538 100644 (file)
 - job:
     name: 'testapi-mongodb-backup'
 
-    slave-label: 'testresults'
-
     parameters:
+        - label:
+            name: SLAVE_LABEL
+            default: 'testresults'
+            description: 'Slave label on Jenkins'
         - project-parameter:
-            project: '{project}'
-            branch: '{branch}'
+            project: 'releng'
+            branch: 'master'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/releng
+            description: 'Git URL to use on this Jenkins Slave'
 
     scm:
         - git-scm
index 52957ab..8dba17b 100644 (file)
@@ -26,6 +26,6 @@ if [ $? != 0 ]; then
     exit 1
 else
     echo "Uploading mongodump to artifact $artifact_dir"
-    /usr/local/bin/gsutil cp -r "$workspace"/"$file_name" gs://testingrohit/"$artifact_dir"/
+    /usr/local/bin/gsutil cp -r "$workspace"/"$file_name" gs://artifacts.opnfv.org/"$artifact_dir"/
     echo "MongoDump can be found at http://artifacts.opnfv.org/$artifact_dir"
 fi
index 04d71f7..b4e60b0 100644 (file)
@@ -4,7 +4,7 @@ function check() {
 
     # Verify hosted
     sleep 5
-    cmd=`curl -s --head  --request GET http://testresults.opnfv.org/auto/swagger/spec | grep '200 OK' > /dev/null`
+    cmd=`curl -s --head  --request GET http://testresults.opnfv.org/test/swagger/spec | grep '200 OK' > /dev/null`
     rc=$?
     echo $rc
 
@@ -63,7 +63,7 @@ else
 fi
 
 echo "Running a container with the new image"
-sudo docker run -dti -p "8711:8000" -e "mongodb_url=mongodb://172.17.0.1:27017" -e "swagger_url=http://testresults.opnfv.org/auto" opnfv/testapi:latest
+sudo docker run -dti -p "8082:8000" -e "mongodb_url=mongodb://172.17.0.1:27017" -e "swagger_url=http://testresults.opnfv.org/test" opnfv/testapi:latest
 
 if check; then
     echo "TestResults Hosted."
@@ -71,7 +71,7 @@ else
     echo "TestResults Hosting Failed"
     if [[ $(sudo docker images | grep "opnfv/testapi" | grep "old" | awk '{print $3}') ]]; then
         echo "Running old Image"
-        sudo docker run -dti -p "8711:8000" -e "mongodb_url=mongodb://172.17.0.1:27017" -e "swagger_url=http://testresults.opnfv.org/auto" opnfv/testapi:old
+        sudo docker run -dti -p "8082:8000" -e "mongodb_url=mongodb://172.17.0.1:27017" -e "swagger_url=http://testresults.opnfv.org/test" opnfv/testapi:old
         exit 1
     fi
 fi
index 70976d2..075e31f 100644 (file)
@@ -39,12 +39,12 @@ if __name__ == '__main__':
     parser.add_argument('-ru', '--resource-listing-url',
                         type=str,
                         required=False,
-                        default='http://testresults.opnfv.org/auto/swagger/spec.json',
+                        default='http://testresults.opnfv.org/test/swagger/spec.json',
                         help='Resource Listing Spec File')
     parser.add_argument('-au', '--api-declaration-url',
                         type=str,
                         required=False,
-                        default='http://testresults.opnfv.org/auto/swagger/spec',
+                        default='http://testresults.opnfv.org/test/swagger/spec',
                         help='API Declaration Spec File')
     parser.add_argument('-o', '--output-directory',
                         required=True,
index 5f6c3df..a2628e2 100644 (file)
@@ -172,8 +172,7 @@ class GenericApiHandler(RequestHandler):
                                 .format(new_query, self.table))
 
         # we merge the whole document """
-        edit_request = data.format()
-        edit_request.update(self._update_requests(data))
+        edit_request = self._update_requests(data)
 
         """ Updating the DB """
         yield self._eval_db(self.table, 'update', query, edit_request,
@@ -188,7 +187,10 @@ class GenericApiHandler(RequestHandler):
                                            data.__getattribute__(k))
         if not request:
             raise HTTPError(HTTP_FORBIDDEN, "Nothing to update")
-        return request
+
+        edit_request = data.format()
+        edit_request.update(request)
+        return edit_request
 
     @staticmethod
     def _update_request(edit_request, key, new_value, old_value):
index a9b89eb..a8c1a94 100644 (file)
@@ -1,6 +1,7 @@
 from opnfv_testapi.common.constants import HTTP_FORBIDDEN
 from opnfv_testapi.resources.handlers import GenericApiHandler
 from opnfv_testapi.resources.scenario_models import Scenario
+import opnfv_testapi.resources.scenario_models as models
 from opnfv_testapi.tornado_swagger import swagger
 
 
@@ -104,11 +105,169 @@ class ScenarioGURHandler(GenericScenarioHandler):
         """
             @description: update a single scenario by name
             @param body: fields to be updated
-            @type body: L{ScenarioCreateRequest}
+            @type body: L{ScenarioUpdateRequest}
             @in body: body
             @rtype: L{Scenario}
             @return 200: update success
             @raise 404: scenario not exist
             @raise 403: nothing to update
         """
-        pass
+        query = {'name': name}
+        db_keys = ['name']
+        self._update(query, db_keys)
+
+    def _update_query(self, keys, data):
+        query = dict()
+        equal = True
+        if self._is_rename():
+            new = self._term.get('name')
+            if data.name != new:
+                equal = False
+                query['name'] = new
+
+        return equal, query
+
+    def _update_requests(self, data):
+        updates = {
+            ('name', 'update'): self._update_requests_rename,
+            ('installer', 'add'): self._update_requests_add_installer,
+            ('installer', 'delete'): self._update_requests_delete_installer,
+            ('version', 'add'): self._update_requests_add_version,
+            ('version', 'delete'): self._update_requests_delete_version,
+            ('owner', 'update'): self._update_requests_change_owner,
+            ('project', 'add'): self._update_requests_add_project,
+            ('project', 'delete'): self._update_requests_delete_project,
+            ('customs', 'add'): self._update_requests_add_customs,
+            ('customs', 'delete'): self._update_requests_delete_customs,
+            ('score', 'add'): self._update_requests_add_score,
+            ('trust_indicator', 'add'): self._update_requests_add_ti,
+        }
+
+        updates[(self._field, self._op)](data)
+
+        return data.format()
+
+    def _iter_installers(xstep):
+        def magic(self, data):
+            [xstep(self, installer)
+             for installer in self._filter_installers(data.installers)]
+        return magic
+
+    def _iter_versions(xstep):
+        def magic(self, installer):
+            [xstep(self, version)
+             for version in (self._filter_versions(installer.versions))]
+        return magic
+
+    def _iter_projects(xstep):
+        def magic(self, version):
+            [xstep(self, project)
+             for project in (self._filter_projects(version.projects))]
+        return magic
+
+    def _update_requests_rename(self, data):
+        data.name = self._term.get('name')
+
+    def _update_requests_add_installer(self, data):
+        data.installers.append(models.ScenarioInstaller.from_dict(self._term))
+
+    def _update_requests_delete_installer(self, data):
+        data.installers = self._remove_installers(data.installers)
+
+    @_iter_installers
+    def _update_requests_add_version(self, installer):
+        installer.versions.append(models.ScenarioVersion.from_dict(self._term))
+
+    @_iter_installers
+    def _update_requests_delete_version(self, installer):
+        installer.versions = self._remove_versions(installer.versions)
+
+    @_iter_installers
+    @_iter_versions
+    def _update_requests_change_owner(self, version):
+        version.owner = self._term.get('owner')
+
+    @_iter_installers
+    @_iter_versions
+    def _update_requests_add_project(self, version):
+        version.projects.append(models.ScenarioProject.from_dict(self._term))
+
+    @_iter_installers
+    @_iter_versions
+    def _update_requests_delete_project(self, version):
+        version.projects = self._remove_projects(version.projects)
+
+    @_iter_installers
+    @_iter_versions
+    @_iter_projects
+    def _update_requests_add_customs(self, project):
+        project.customs = list(set(project.customs + self._term))
+
+    @_iter_installers
+    @_iter_versions
+    @_iter_projects
+    def _update_requests_delete_customs(self, project):
+        project.customs = filter(
+            lambda f: f not in self._term,
+            project.customs)
+
+    @_iter_installers
+    @_iter_versions
+    @_iter_projects
+    def _update_requests_add_score(self, project):
+        project.scores.append(
+            models.ScenarioScore.from_dict(self._term))
+
+    @_iter_installers
+    @_iter_versions
+    @_iter_projects
+    def _update_requests_add_ti(self, project):
+        project.trust_indicators.append(
+            models.ScenarioTI.from_dict(self._term))
+
+    def _is_rename(self):
+        return self._field == 'name' and self._op == 'update'
+
+    def _remove_installers(self, installers):
+        return self._remove('installer', installers)
+
+    def _filter_installers(self, installers):
+        return self._filter('installer', installers)
+
+    def _remove_versions(self, versions):
+        return self._remove('version', versions)
+
+    def _filter_versions(self, versions):
+        return self._filter('version', versions)
+
+    def _remove_projects(self, projects):
+        return self._remove('project', projects)
+
+    def _filter_projects(self, projects):
+        return self._filter('project', projects)
+
+    def _remove(self, field, fields):
+        return filter(
+            lambda f: getattr(f, field) != self._locate.get(field),
+            fields)
+
+    def _filter(self, field, fields):
+        return filter(
+            lambda f: getattr(f, field) == self._locate.get(field),
+            fields)
+
+    @property
+    def _field(self):
+        return self.json_args.get('field')
+
+    @property
+    def _op(self):
+        return self.json_args.get('op')
+
+    @property
+    def _locate(self):
+        return self.json_args.get('locate')
+
+    @property
+    def _term(self):
+        return self.json_args.get('term')
index f89a124..73bcbe9 100644 (file)
@@ -2,6 +2,14 @@ import models
 from opnfv_testapi.tornado_swagger import swagger
 
 
+def list_default(value):
+    return value if value else list()
+
+
+def dict_default(value):
+    return value if value else dict()
+
+
 @swagger.model()
 class ScenarioTI(models.ModelBase):
     def __init__(self, date=None, status='silver'):
@@ -32,9 +40,9 @@ class ScenarioProject(models.ModelBase):
                  scores=None,
                  trust_indicators=None):
         self.project = project
-        self.customs = customs
-        self.scores = scores
-        self.trust_indicators = trust_indicators
+        self.customs = list_default(customs)
+        self.scores = list_default(scores)
+        self.trust_indicators = list_default(trust_indicators)
 
     @staticmethod
     def attr_parser():
@@ -50,7 +58,7 @@ class ScenarioVersion(models.ModelBase):
     """
     def __init__(self, version=None, projects=None):
         self.version = version
-        self.projects = projects
+        self.projects = list_default(projects)
 
     @staticmethod
     def attr_parser():
@@ -65,7 +73,7 @@ class ScenarioInstaller(models.ModelBase):
     """
     def __init__(self, installer=None, versions=None):
         self.installer = installer
-        self.versions = versions if versions else list()
+        self.versions = list_default(versions)
 
     @staticmethod
     def attr_parser():
@@ -80,13 +88,28 @@ class ScenarioCreateRequest(models.ModelBase):
     """
     def __init__(self, name='', installers=None):
         self.name = name
-        self.installers = installers if installers else list()
+        self.installers = list_default(installers)
 
     @staticmethod
     def attr_parser():
         return {'installers': ScenarioInstaller}
 
 
+@swagger.model()
+class ScenarioUpdateRequest(models.ModelBase):
+    """
+        @property field: update field
+        @property op: add/delete/update
+        @property locate: information used to locate the field
+        @property term: new value
+    """
+    def __init__(self, field=None, op=None, locate=None, term=None):
+        self.field = field
+        self.op = op
+        self.locate = dict_default(locate)
+        self.term = dict_default(term)
+
+
 @swagger.model()
 class Scenario(models.ModelBase):
     """
@@ -97,7 +120,7 @@ class Scenario(models.ModelBase):
         self.name = name
         self._id = _id
         self.creation_date = create_date
-        self.installers = installers if installers else list()
+        self.installers = list_default(installers)
 
     @staticmethod
     def attr_parser():
index ff59795..c15dc32 100644 (file)
@@ -1,20 +1,20 @@
+from copy import deepcopy
 import json
 import os
+from datetime import datetime
 
 from opnfv_testapi.common.constants import HTTP_BAD_REQUEST
 from opnfv_testapi.common.constants import HTTP_FORBIDDEN
 from opnfv_testapi.common.constants import HTTP_OK
-from opnfv_testapi.resources.scenario_models import Scenario
-from opnfv_testapi.resources.scenario_models import ScenarioCreateRequest
-from opnfv_testapi.resources.scenario_models import Scenarios
+import opnfv_testapi.resources.scenario_models as models
 from test_testcase import TestBase
 
 
 class TestScenarioBase(TestBase):
     def setUp(self):
         super(TestScenarioBase, self).setUp()
-        self.get_res = Scenario
-        self.list_res = Scenarios
+        self.get_res = models.Scenario
+        self.list_res = models.Scenarios
         self.basePath = '/api/v1/scenarios'
         self.req_d = self._load_request('scenario-c1.json')
         self.req_2 = self._load_request('scenario-c2.json')
@@ -46,6 +46,17 @@ class TestScenarioBase(TestBase):
         self.assertIsNotNone(scenario_dict['creation_date'])
         self.assertDictContainsSubset(req, scenario_dict)
 
+    @staticmethod
+    def _set_query(*args):
+        uri = ''
+        for arg in args:
+            uri += arg + '&'
+        return uri[0: -1]
+
+    def _get_and_assert(self, name, req=None):
+        code, body = self.get(name)
+        self.assert_res(code, body, req)
+
 
 class TestScenarioCreate(TestScenarioBase):
     def test_withoutBody(self):
@@ -53,13 +64,13 @@ class TestScenarioCreate(TestScenarioBase):
         self.assertEqual(code, HTTP_BAD_REQUEST)
 
     def test_emptyName(self):
-        req_empty = ScenarioCreateRequest('')
+        req_empty = models.ScenarioCreateRequest('')
         (code, body) = self.create(req_empty)
         self.assertEqual(code, HTTP_BAD_REQUEST)
         self.assertIn('name missing', body)
 
     def test_noneName(self):
-        req_none = ScenarioCreateRequest(None)
+        req_none = models.ScenarioCreateRequest(None)
         (code, body) = self.create(req_none)
         self.assertEqual(code, HTTP_BAD_REQUEST)
         self.assertIn('name missing', body)
@@ -83,8 +94,7 @@ class TestScenarioGet(TestScenarioBase):
         self.scenario_2 = self.create_return_name(self.req_2)
 
     def test_getByName(self):
-        code, body = self.get(self.scenario_1)
-        self.assert_res(code, body, req=self.req_d)
+        self._get_and_assert(self.scenario_1, self.req_d)
 
     def test_getAll(self):
         self._query_and_assert(query=None, reqs=[self.req_d, self.req_2])
@@ -113,13 +123,6 @@ class TestScenarioGet(TestScenarioBase):
 
         self._query_and_assert(query, reqs=[self.req_d])
 
-    @staticmethod
-    def _set_query(*args):
-        uri = ''
-        for arg in args:
-            uri += arg + '&'
-        return uri[0: -1]
-
     def _query_and_assert(self, query, found=True, reqs=None):
         code, body = self.query(query)
         if not found:
@@ -131,3 +134,172 @@ class TestScenarioGet(TestScenarioBase):
                 for scenario in body.scenarios:
                     if req['name'] == scenario.name:
                         self.assert_res(code, scenario, req)
+
+
+class TestScenarioUpdate(TestScenarioBase):
+    def setUp(self):
+        super(TestScenarioUpdate, self).setUp()
+        self.scenario = self.create_return_name(self.req_d)
+
+    def _execute(set_update):
+        def magic(self):
+            update, scenario = set_update(self, deepcopy(self.req_d))
+            self._update_and_assert(update, scenario)
+        return magic
+
+    def test_renameScenario(self):
+        new_name = 'nosdn-nofeature-noha'
+        new_scenario = deepcopy(self.req_d)
+        new_scenario['name'] = new_name
+        update_req = models.ScenarioUpdateRequest(field='name',
+                                                  op='update',
+                                                  locate={},
+                                                  term={'name': new_name})
+        self._update_and_assert(update_req, new_scenario, new_name)
+
+    @_execute
+    def test_addInstaller(self, scenario):
+        add = models.ScenarioInstaller(installer='daisy', versions=list())
+        scenario['installers'].append(add.format())
+        update = models.ScenarioUpdateRequest(field='installer',
+                                              op='add',
+                                              locate={},
+                                              term=add.format())
+        return update, scenario
+
+    @_execute
+    def test_deleteInstaller(self, scenario):
+        scenario['installers'] = filter(lambda f: f['installer'] != 'apex',
+                                        scenario['installers'])
+
+        update = models.ScenarioUpdateRequest(field='installer',
+                                              op='delete',
+                                              locate={'installer': 'apex'})
+        return update, scenario
+
+    @_execute
+    def test_addVersion(self, scenario):
+        add = models.ScenarioVersion(version='danube', projects=list())
+        scenario['installers'][0]['versions'].append(add.format())
+        update = models.ScenarioUpdateRequest(field='version',
+                                              op='add',
+                                              locate={'installer': 'apex'},
+                                              term=add.format())
+        return update, scenario
+
+    @_execute
+    def test_deleteVersion(self, scenario):
+        scenario['installers'][0]['versions'] = filter(
+            lambda f: f['version'] != 'master',
+            scenario['installers'][0]['versions'])
+
+        update = models.ScenarioUpdateRequest(field='version',
+                                              op='delete',
+                                              locate={'installer': 'apex',
+                                                      'version': 'master'})
+        return update, scenario
+
+    @_execute
+    def test_changeOwner(self, scenario):
+        scenario['installers'][0]['versions'][0]['owner'] = 'lucy'
+
+        update = models.ScenarioUpdateRequest(field='owner',
+                                              op='update',
+                                              locate={'installer': 'apex',
+                                                      'version': 'master'},
+                                              term={'owner': 'lucy'})
+        return update, scenario
+
+    @_execute
+    def test_addProject(self, scenario):
+        add = models.ScenarioProject(project='qtip').format()
+        scenario['installers'][0]['versions'][0]['projects'].append(add)
+        update = models.ScenarioUpdateRequest(field='project',
+                                              op='add',
+                                              locate={'installer': 'apex',
+                                                      'version': 'master'},
+                                              term=add)
+        return update, scenario
+
+    @_execute
+    def test_deleteProject(self, scenario):
+        scenario['installers'][0]['versions'][0]['projects'] = filter(
+            lambda f: f['project'] != 'functest',
+            scenario['installers'][0]['versions'][0]['projects'])
+
+        update = models.ScenarioUpdateRequest(field='project',
+                                              op='delete',
+                                              locate={
+                                                  'installer': 'apex',
+                                                  'version': 'master',
+                                                  'project': 'functest'})
+        return update, scenario
+
+    @_execute
+    def test_addCustoms(self, scenario):
+        add = ['odl', 'parser', 'vping_ssh']
+        projects = scenario['installers'][0]['versions'][0]['projects']
+        functest = filter(lambda f: f['project'] == 'functest', projects)[0]
+        functest['customs'] = ['healthcheck', 'odl', 'parser', 'vping_ssh']
+        update = models.ScenarioUpdateRequest(field='customs',
+                                              op='add',
+                                              locate={
+                                                  'installer': 'apex',
+                                                  'version': 'master',
+                                                  'project': 'functest'},
+                                              term=add)
+        return update, scenario
+
+    @_execute
+    def test_deleteCustoms(self, scenario):
+        projects = scenario['installers'][0]['versions'][0]['projects']
+        functest = filter(lambda f: f['project'] == 'functest', projects)[0]
+        functest['customs'] = ['healthcheck']
+        update = models.ScenarioUpdateRequest(field='customs',
+                                              op='delete',
+                                              locate={
+                                                  'installer': 'apex',
+                                                  'version': 'master',
+                                                  'project': 'functest'},
+                                              term=['vping_ssh'])
+        return update, scenario
+
+    @_execute
+    def test_addScore(self, scenario):
+        add = models.ScenarioScore(date=str(datetime.now()), score='11/12')
+        projects = scenario['installers'][0]['versions'][0]['projects']
+        functest = filter(lambda f: f['project'] == 'functest', projects)[0]
+        functest['scores'].append(add.format())
+        update = models.ScenarioUpdateRequest(field='score',
+                                              op='add',
+                                              locate={
+                                                  'installer': 'apex',
+                                                  'version': 'master',
+                                                  'project': 'functest'},
+                                              term=add.format())
+        return update, scenario
+
+    @_execute
+    def test_addTi(self, scenario):
+        add = models.ScenarioTI(date=str(datetime.now()), status='gold')
+        projects = scenario['installers'][0]['versions'][0]['projects']
+        functest = filter(lambda f: f['project'] == 'functest', projects)[0]
+        functest['trust_indicators'].append(add.format())
+        update = models.ScenarioUpdateRequest(field='trust_indicator',
+                                              op='add',
+                                              locate={
+                                                  'installer': 'apex',
+                                                  'version': 'master',
+                                                  'project': 'functest'},
+                                              term=add.format())
+        return update, scenario
+
+    def _update_and_assert(self, update_req, new_scenario, name=None):
+        code, _ = self.update(update_req, self.scenario)
+        self.assertEqual(code, HTTP_OK)
+        self._get_and_assert(self._none_default(name, self.scenario),
+                             new_scenario)
+
+    @staticmethod
+    def _none_default(check, default):
+        return check if check else default