Merge "add qtip reporting"
authorMorgan Richomme <morgan.richomme@orange.com>
Wed, 10 May 2017 12:23:05 +0000 (12:23 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Wed, 10 May 2017 12:23:05 +0000 (12:23 +0000)
86 files changed:
jjb/3rd_party_ci/download-netvirt-artifact.sh
jjb/3rd_party_ci/odl-netvirt.yml
jjb/apex/apex-iso-verify.sh [new file with mode: 0755]
jjb/apex/apex-snapshot-deploy.sh
jjb/apex/apex-upload-artifact.sh
jjb/apex/apex.yml
jjb/armband/armband-ci-jobs.yml
jjb/armband/armband-deploy.sh
jjb/bottlenecks/bottlenecks-project-jobs.yml
jjb/compass4nfv/compass-ci-jobs.yml
jjb/compass4nfv/compass-dovetail-jobs.yml
jjb/compass4nfv/compass-project-jobs.yml
jjb/compass4nfv/compass-verify-jobs.yml
jjb/cperf/cperf-ci-jobs.yml
jjb/daisy4nfv/daisy4nfv-basic.sh
jjb/daisy4nfv/daisy4nfv-build.sh
jjb/daisy4nfv/daisy4nfv-download-artifact.sh
jjb/doctor/doctor.yml
jjb/dovetail/dovetail-ci-jobs.yml
jjb/dovetail/dovetail-run.sh
jjb/dovetail/dovetail-weekly-jobs.yml
jjb/fuel/fuel-daily-jobs.yml
jjb/functest/functest-daily-jobs.yml
jjb/functest/functest-loop.sh
jjb/functest/set-functest-env.sh
jjb/global/slave-params.yml
jjb/joid/joid-daily-jobs.yml
jjb/kvmfornfv/kvmfornfv.yml
jjb/opera/opera-daily-jobs.yml
jjb/releng/opnfv-docker-arm.yml
jjb/releng/opnfv-docker.sh
jjb/securedlab/check-jinja2.sh [new file with mode: 0755]
jjb/securedlab/check-jinja2.yml [new file with mode: 0644]
jjb/xci/bifrost-provision.sh
jjb/xci/bifrost-verify.sh
jjb/yardstick/yardstick-ci-jobs.yml
modules/opnfv/deployment/compass/adapter.py
prototypes/bifrost/playbooks/opnfv-virtual.yaml
prototypes/bifrost/scripts/bifrost-provision.sh
prototypes/bifrost/scripts/destroy-env.sh
prototypes/openstack-ansible/playbooks/configure-targethosts.yml
prototypes/xci/file/exports [deleted file]
prototypes/xci/file/ha/flavor-vars.yml
prototypes/xci/file/ha/openstack_user_config.yml
prototypes/xci/file/install-ansible.sh [new file with mode: 0644]
prototypes/xci/file/mini/configure-targethosts.yml [deleted file]
prototypes/xci/file/mini/flavor-vars.yml
prototypes/xci/file/mini/openstack_user_config.yml
prototypes/xci/file/modules [deleted file]
prototypes/xci/file/noha/configure-targethosts.yml [deleted file]
prototypes/xci/file/noha/flavor-vars.yml
prototypes/xci/file/noha/openstack_user_config.yml
prototypes/xci/playbooks/configure-localhost.yml
prototypes/xci/playbooks/configure-opnfvhost.yml
prototypes/xci/playbooks/configure-targethosts.yml [moved from prototypes/xci/file/ha/configure-targethosts.yml with 62% similarity]
prototypes/xci/playbooks/roles/configure-network/tasks/main.yml
prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml
prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml [new file with mode: 0644]
prototypes/xci/template/compute.interface.j2
prototypes/xci/template/controller.interface.j2
prototypes/xci/template/opnfv.interface.j2
prototypes/xci/xci-deploy.sh
utils/create_pod_file.py [new file with mode: 0644]
utils/fetch_os_creds.sh
utils/test/reporting/functest/reporting-status.py
utils/test/reporting/functest/template/index-status-tmpl.html
utils/test/reporting/reporting.yaml
utils/test/reporting/utils/reporting_utils.py
utils/test/testapi/opnfv_testapi/common/check.py [new file with mode: 0644]
utils/test/testapi/opnfv_testapi/resources/handlers.py
utils/test/testapi/opnfv_testapi/resources/pod_handlers.py
utils/test/testapi/opnfv_testapi/resources/project_handlers.py
utils/test/testapi/opnfv_testapi/resources/result_handlers.py
utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py
utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py
utils/test/testapi/opnfv_testapi/resources/testcase_models.py
utils/test/testapi/opnfv_testapi/tests/unit/executor.py [new file with mode: 0644]
utils/test/testapi/opnfv_testapi/tests/unit/test_base.py
utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py
utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py
utils/test/testapi/opnfv_testapi/tests/unit/test_project.py
utils/test/testapi/opnfv_testapi/tests/unit/test_result.py
utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py
utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py
utils/test/testapi/opnfv_testapi/tests/unit/test_token.py
utils/test/testapi/opnfv_testapi/tests/unit/test_version.py

index 6aea01d..7ecf8d7 100755 (executable)
@@ -6,11 +6,18 @@ set -o pipefail
 ODL_ZIP=distribution-karaf-0.6.0-SNAPSHOT.zip
 
 echo "Attempting to fetch the artifact location from ODL Jenkins"
-CHANGE_DETAILS_URL="https://git.opendaylight.org/gerrit/changes/netvirt~master~$GERRIT_CHANGE_ID/detail"
+if [ "$ODL_BRANCH" != 'master' ]; then
+  DIST=$(echo ${ODL_BRANCH} | sed -rn 's#([a-zA-Z]+)/([a-zA-Z]+)#\2#p')
+  ODL_BRANCH=$(echo ${ODL_BRANCH} | sed -rn 's#([a-zA-Z]+)/([a-zA-Z]+)#\1%2F\2#p')
+else
+  DIST='nitrogen'
+fi
+CHANGE_DETAILS_URL="https://git.opendaylight.org/gerrit/changes/netvirt~${ODL_BRANCH}~${GERRIT_CHANGE_ID}/detail"
 # due to limitation with the Jenkins Gerrit Trigger, we need to use Gerrit REST API to get the change details
-ODL_BUILD_JOB_NUM=$(curl -s $CHANGE_DETAILS_URL | grep -Eo 'netvirt-distribution-check-carbon/[0-9]+' | tail -1 | grep -Eo [0-9]+)
+ODL_BUILD_JOB_NUM=$(curl --fail -s ${CHANGE_DETAILS_URL} | grep -Eo "netvirt-distribution-check-${DIST}/[0-9]+" | tail -1 | grep -Eo [0-9]+)
+DISTRO_CHECK_CONSOLE_LOG="https://logs.opendaylight.org/releng/jenkins092/netvirt-distribution-check-${DIST}/${ODL_BUILD_JOB_NUM}/console.log.gz"
+NETVIRT_ARTIFACT_URL=$(curl --fail -s --compressed ${DISTRO_CHECK_CONSOLE_LOG} | grep 'BUNDLE_URL' | cut -d = -f 2)
 
-NETVIRT_ARTIFACT_URL="https://jenkins.opendaylight.org/releng/job/netvirt-distribution-check-carbon/${ODL_BUILD_JOB_NUM}/artifact/${ODL_ZIP}"
 echo -e "URL to artifact is\n\t$NETVIRT_ARTIFACT_URL"
 
 echo "Downloading the artifact. This could take time..."
index 470e433..a937acb 100644 (file)
             branch: '{stream}'
             gs-pathname: ''
             disabled: false
+        - carbon:
+            branch: 'stable/carbon'
+            gs-pathname: ''
+            disabled: false
 #####################################
 # patch verification phases
 #####################################
                 - name: 'odl-netvirt-verify-virtual-install-netvirt-{stream}'
                   current-parameters: false
                   predefined-parameters: |
+                    ODL_BRANCH={branch}
                     BRANCH=$BRANCH
                     GERRIT_REFSPEC=$GERRIT_REFSPEC
                     GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
             name: functest
             condition: SUCCESSFUL
             projects:
-                - name: 'functest-netvirt-virtual-suite-{stream}'
+                - name: 'functest-netvirt-virtual-suite-master'
                   predefined-parameters: |
                     DEPLOY_SCENARIO=os-odl_l3-nofeature-ha
-                    FUNCTEST_SUITE_NAME=tempest_smoke_serial
+                    FUNCTEST_SUITE_NAME=odl_netvirt
                     RC_FILE_PATH=$HOME/cloner-info/overcloudrc
                   node-parameters: true
                   kill-phase-on: FAILURE
diff --git a/jjb/apex/apex-iso-verify.sh b/jjb/apex/apex-iso-verify.sh
new file mode 100755 (executable)
index 0000000..cdeac04
--- /dev/null
@@ -0,0 +1,104 @@
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
+# log info to console
+echo "Starting the Apex iso verify."
+echo "--------------------------------------------------------"
+echo
+
+BUILD_DIRECTORY=$WORKSPACE/../$BUILD_DIRECTORY
+
+source $BUILD_DIRECTORY/../opnfv.properties
+
+if ! rpm -q virt-install > /dev/null; then
+  sudo yum -y install virt-install
+fi
+
+# define a clean function
+rm_apex_iso_verify () {
+if sudo virsh list --all | grep apex-iso-verify | grep running; then
+    sudo virsh destroy apex-iso-verify
+fi
+if sudo virsh list --all | grep apex-iso-verify; then
+    sudo virsh undefine apex-iso-verify
+fi
+}
+
+# Make sure a pre-existing iso-verify isn't there
+rm_apex_iso_verify
+
+# run an install from the iso
+# This streams a serial console to tcp port 3737 on localhost
+sudo virt-install -n apex-iso-verify -r 4096 --vcpus 4 --os-variant=rhel7 \
+ --accelerate -v --noautoconsole --nographics \
+ --disk path=/var/lib/libvirt/images/apex-iso-verify.qcow2,size=30,format=qcow2 \
+ -l $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso \
+ --extra-args 'console=ttyS0 console=ttyS0,115200n8 serial inst.ks=file:/iso-verify.ks inst.stage2=hd:LABEL=OPNFV\x20CentOS\x207\x20x86_64:/' \
+ --initrd-inject $BUILD_DIRECTORY/../ci/iso-verify.ks \
+ --serial tcp,host=:3737,protocol=raw
+
+# Attach to tcpport 3737 and echo the output to stdout
+# watch for a 5 min time out, a power off message or a tcp disconnect
+python << EOP
+#!/usr/bin/env python
+
+import sys
+import socket
+from time import sleep
+from time import time
+
+
+TCP_IP = '127.0.0.1'
+TCP_PORT = 3737
+BUFFER_SIZE = 1024
+
+try:
+    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    s.connect((TCP_IP, TCP_PORT))
+except Exception, e:
+    print "Failed to connect to the iso-verofy vm's serial console"
+    print "this probably means that the VM failed to start"
+    raise e
+
+activity = time()
+data = s.recv(BUFFER_SIZE)
+last_data = data
+while time() - activity < 300:
+    try:
+        if data != last_data:
+            activity = time()
+        last_data = data
+        data = s.recv(BUFFER_SIZE)
+        sys.stdout.write(data)
+        if 'Powering off' in data:
+            break
+        sleep(.5)
+    except socket.error, e:
+        # for now assuming that the connection was closed
+        # which is good, means the vm finished installing
+        # printing the error output just in case we need to debug
+        print "VM console connection lost: %s" % msg
+        break
+s.close()
+
+if time() - activity > 300:
+    print "failing due to console inactivity"
+    exit(1)
+else:
+    print "Success!"
+EOP
+
+# save the python return code for after cleanup
+python_rc=$?
+
+# clean up
+rm_apex_iso_verify
+
+# Exit with the RC of the Python job
+exit $python_rc
+
+echo
+echo "--------------------------------------------------------"
+echo "Done!"
index 06c0023..3eb3cf2 100644 (file)
@@ -129,6 +129,7 @@ if [ -z "$virsh_vm_defs" ]; then
 fi
 
 for node_def in ${virsh_vm_defs}; do
+  sed  -ri "s/machine='[^\s]+'/machine='pc'/" ${node_def}
   sudo virsh define ${node_def}
   node=$(echo ${node_def} | awk -F '.' '{print $1}')
   sudo cp -f ${node}.qcow2 /var/lib/libvirt/images/
index c2de7d7..d046c11 100755 (executable)
@@ -3,8 +3,13 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
+if [ -z "$ARTIFACT_TYPE" ]; then
+  echo "ERROR: ARTIFACT_TYPE not provided...exiting"
+  exit 1
+fi
+
 # log info to console
-echo "Uploading the Apex artifact. This could take some time..."
+echo "Uploading the Apex ${ARTIFACT_TYPE} artifact. This could take some time..."
 echo "--------------------------------------------------------"
 echo
 
@@ -18,7 +23,7 @@ echo "Cloning releng repository..."
 [ -d releng ] && rm -rf releng
 git clone https://gerrit.opnfv.org/gerrit/releng $WORKSPACE/releng/ &> /dev/null
 #this is where we import the siging key
-if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then 
+if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then
   source $WORKSPACE/releng/utils/gpg_import_key.sh
 fi
 
@@ -45,32 +50,18 @@ echo "ISO signature Upload Complete!"
 }
 
 uploadiso () {
-# upload artifact and additional files to google storage
-gsutil cp $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso > gsutil.iso.log
-echo "ISO Upload Complete!"
-RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
-RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
-VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud; do # removed onos for danube
-    RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
-done
-SRPM_INSTALL_PATH=$BUILD_DIRECTORY
-SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
-VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud; do # removed onos for danube
-    SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
-done
+  gsutil cp $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso > gsutil.iso.log
+  echo "ISO Upload Complete!"
 }
 
 uploadrpm () {
-#This is where we upload the rpms
-for artifact in $RPM_LIST $SRPM_LIST; do
-  echo "Uploading artifact: ${artifact}"
-  gsutil cp $artifact gs://$GS_URL/$(basename $artifact) > gsutil.iso.log
-  echo "Upload complete for ${artifact}"
-done
-gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.properties > gsutil.properties.log
-gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.latest.log
+  for artifact in $RPM_LIST $SRPM_LIST; do
+    echo "Uploading artifact: ${artifact}"
+    gsutil cp $artifact gs://$GS_URL/$(basename $artifact) > gsutil.iso.log
+    echo "Upload complete for ${artifact}"
+  done
+  gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.properties > gsutil.properties.log
+  gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.latest.log
 }
 
 uploadsnap () {
@@ -84,21 +75,43 @@ uploadsnap () {
   echo "Upload complete for Snapshot"
 }
 
-if echo $WORKSPACE | grep promote > /dev/null; then
-  uploadsnap
-elif gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then
+if gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then
   echo "Signing Key avaliable"
-  signiso
+  SIGN_ARTIFACT="true"
+fi
+
+if [ "$ARTIFACT_TYPE" == 'snapshot' ]; then
+  uploadsnap
+elif [ "$ARTIFACT_TYPE" == 'iso' ]; then
+  if [[ -n "$SIGN_ARTIFACT" && "$SIGN_ARTIFACT" == "true" ]]; then
+    signiso
+  fi
   uploadiso
-  signrpm
+elif [ "$ARTIFACT_TYPE" == 'rpm' ]; then
+  RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
+  RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
+  VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
+  for pkg in common undercloud; do # removed onos for danube
+    RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
+  done
+  SRPM_INSTALL_PATH=$BUILD_DIRECTORY
+  SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
+  VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
+  for pkg in common undercloud; do # removed onos for danube
+    SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
+  done
+
+  if [[ -n "$SIGN_ARTIFACT" && "$SIGN_ARTIFACT" == "true" ]]; then
+    signrpm
+  fi
   uploadrpm
 else
-  uploadiso
-  uploadrpm
+  echo "ERROR: Unknown artifact type ${ARTIFACT_TYPE} to upload...exiting"
+  exit 1
 fi
 
 echo
 echo "--------------------------------------------------------"
 echo "Done!"
-echo "ISO Artifact is available as http://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"
-echo "RPM Artifact is available as http://$GS_URL/$(basename $OPNFV_RPM_URL)"
+if [ "$ARTIFACT_TYPE" == 'iso' ]; then echo "ISO Artifact is available as http://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"; fi
+if [ "$ARTIFACT_TYPE" == 'rpm' ]; then echo "RPM Artifact is available as http://$GS_URL/$(basename $OPNFV_RPM_URL)"; fi
index e7982ba..7ca2e6e 100644 (file)
@@ -12,6 +12,7 @@
         - 'apex-daily-{stream}'
         - 'apex-csit-promote-daily-{stream}'
         - 'apex-fdio-promote-daily-{stream}'
+        - 'apex-verify-iso-{stream}'
 
     # stream:    branch with - in place of / (eg. stable-arno)
     # branch:    branch (eg. stable/arno)
             git-revision: false
             same-node: true
             block: true
+        - inject:
+           properties-content: ARTIFACT_TYPE=rpm
+        - 'apex-upload-artifact'
+        - trigger-builds:
+          - project: 'apex-verify-iso-{stream}'
+            predefined-parameters: |
+              BUILD_DIRECTORY=apex-build-{stream}/.build
+            git-revision: false
+            block: true
+            same-node: true
+        - inject:
+           properties-content: ARTIFACT_TYPE=iso
         - 'apex-upload-artifact'
 
+# ISO verify job
+- job-template:
+    name: 'apex-verify-iso-{stream}'
+
+    # Job template for builds
+    #
+    # Required Variables:
+    #     stream:    branch with - in place of / (eg. stable)
+    #     branch:    branch (eg. stable)
+    node: '{daily-slave}'
+
+    disabled: false
+
+    concurrent: true
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - apex-parameter:
+            gs-pathname: '{gs-pathname}'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: "Used for overriding the GIT URL coming from parameters macro."
+
+    scm:
+        - git-scm
+
+    properties:
+        - logrotate-default
+        - build-blocker:
+            use-build-blocker: true
+            block-level: 'NODE'
+            blocking-jobs:
+                - 'apex-deploy.*'
+        - throttle:
+            max-per-node: 1
+            max-total: 10
+            option: 'project'
+
+    builders:
+        - 'apex-iso-verify'
+
 - job-template:
     name: 'apex-deploy-virtual-{scenario}-{stream}'
 
         # 4.not used for release criteria or compliance,
         #   only to debug the dovetail tool bugs with apex
         #- trigger-builds:
-        #    - project: 'dovetail-apex-{slave}-debug-{stream}'
+        #    - project: 'dovetail-apex-{slave}-proposed_tests-{stream}'
         #      current-parameters: false
         #      predefined-parameters:
         #        DEPLOY_SCENARIO=os-nosdn-nofeature-ha
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-          - project: 'apex-deploy-baremetal-os-odl_l3-fdio-noha-{stream}'
+          - project: 'apex-deploy-baremetal-os-odl_l3-fdio-ha-{stream}'
             predefined-parameters: |
               BUILD_DIRECTORY=apex-build-{stream}/.build
               OPNFV_CLEAN=yes
         - trigger-builds:
           - project: 'functest-apex-{daily-slave}-daily-{stream}'
             predefined-parameters:
-              DEPLOY_SCENARIO=os-odl_l3-fdio-noha
+              DEPLOY_SCENARIO=os-odl_l3-fdio-ha
             block: true
             same-node: true
             block-thresholds:
         - trigger-builds:
           - project: 'yardstick-apex-{slave}-daily-{stream}'
             predefined-parameters:
-              DEPLOY_SCENARIO=os-odl_l3-fdio-noha
+              DEPLOY_SCENARIO=os-odl_l3-fdio-ha
             block: true
             same-node: true
             block-thresholds:
             same-node: true
         - shell:
             !include-raw-escape: ./apex-snapshot-create.sh
-        - shell:
-            !include-raw-escape: ./apex-upload-artifact.sh
+        - inject:
+           properties-content: ARTIFACT_TYPE=snapshot
+        - 'apex-upload-artifact'
 
 # FDIO promote
 - job-template:
             same-node: true
         - shell:
             !include-raw-escape: ./apex-snapshot-create.sh
-        - shell:
-            !include-raw-escape: ./apex-upload-artifact.sh
+        - inject:
+           properties-content: ARTIFACT_TYPE=snapshot
+        - 'apex-upload-artifact'
 
 - job-template:
     name: 'apex-gs-clean-{stream}'
         - shell:
             !include-raw: ./apex-workspace-cleanup.sh
 
+- builder:
+    name: 'apex-iso-verify'
+    builders:
+        - shell:
+            !include-raw: ./apex-iso-verify.sh
+
+
 - builder:
     name: 'apex-upload-artifact'
     builders:
index 38a729d..17d5204 100644 (file)
             slave-label: arm-pod3
             installer: fuel
             <<: *danube
-        - arm-pod3-2:
-            slave-label: arm-pod3-2
+        - arm-pod4:
+            slave-label: arm-pod4
+            installer: fuel
+            <<: *danube
+        - arm-virtual1:
+            slave-label: arm-virtual1
             installer: fuel
             <<: *danube
 #--------------------------------
             slave-label: arm-pod3
             installer: fuel
             <<: *master
-        - arm-pod3-2:
-            slave-label: arm-pod3-2
+        - arm-pod4:
+            slave-label: arm-pod4
+            installer: fuel
+            <<: *master
+        - arm-virtual1:
+            slave-label: arm-virtual1
             installer: fuel
             <<: *master
 #--------------------------------
         # 4.not used for release criteria or compliance,
         #   only to debug the dovetail tool bugs with arm pods
         - trigger-builds:
-            - project: 'dovetail-{installer}-{pod}-debug-{stream}'
+            - project: 'dovetail-{installer}-{pod}-proposed_tests-{stream}'
               current-parameters: false
               predefined-parameters:
                 DEPLOY_SCENARIO={scenario}
 - trigger:
     name: 'fuel-os-odl_l2-nofeature-ha-armband-virtual-master-trigger'
     triggers:
-        - timed: '0 2 * * 1'
+        - timed: ''
 - trigger:
     name: 'fuel-os-nosdn-nofeature-ha-armband-virtual-master-trigger'
     triggers:
-        - timed: '0 2 * * 2'
+        - timed: ''
 - trigger:
     name: 'fuel-os-odl_l3-nofeature-ha-armband-virtual-master-trigger'
     triggers:
-        - timed: '0 2 * * 3'
+        - timed: ''
 - trigger:
     name: 'fuel-os-odl_l2-bgpvpn-ha-armband-virtual-master-trigger'
     triggers:
-        - timed: '0 2 * * 4'
+        - timed: ''
 - trigger:
     name: 'fuel-os-odl_l2-nofeature-noha-armband-virtual-master-trigger'
     triggers:
-        - timed: '0 2 * * 5'
+        - timed: ''
 - trigger:
     name: 'fuel-os-odl_l2-sfc-ha-armband-virtual-master-trigger'
     triggers:
-        - timed: '0 2 * * 6'
+        - timed: ''
 - trigger:
     name: 'fuel-os-odl_l2-sfc-noha-armband-virtual-master-trigger'
     triggers:
-        - timed: '0 2 * * 7'
+        - timed: ''
 #--------------------------------------------------------------------
 # Enea Armband CI Virtual Triggers running against danube branch
 #--------------------------------------------------------------------
     name: 'fuel-os-odl_l2-sfc-noha-armband-virtual-danube-trigger'
     triggers:
         - timed: ''
+
+#--------------------------------------------------------------------
+# Enea Armband Non CI Virtual Triggers running against danube branch
+#--------------------------------------------------------------------
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual1-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-ha-arm-virtual1-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual1-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual1-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual1-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-ha-arm-virtual1-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-noha-arm-virtual1-danube-trigger'
+    triggers:
+        - timed: ''
+
+#--------------------------------------------------------------------
+# Enea Armband Non CI Virtual Triggers running against master branch
+#--------------------------------------------------------------------
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual1-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-ha-arm-virtual1-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual1-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual1-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual1-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-ha-arm-virtual1-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-noha-arm-virtual1-master-trigger'
+    triggers:
+        - timed: ''
+
 #----------------------------------------------------------
 # Enea Armband POD 2 Triggers running against master branch
 #----------------------------------------------------------
 # Enea Armband POD 3 Triggers running against master branch (aarch64 slave)
 #--------------------------------------------------------------------------
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-2-master-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod4-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-2-master-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-arm-pod4-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-2-master-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod4-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-2-master-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod4-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-2-master-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod4-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-2-master-trigger'
+    name: 'fuel-os-odl_l2-sfc-ha-arm-pod4-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-2-master-trigger'
+    name: 'fuel-os-odl_l2-sfc-noha-arm-pod4-master-trigger'
     triggers:
         - timed: ''
 #--------------------------------------------------------------------------
 # Enea Armband POD 3 Triggers running against danube branch (aarch64 slave)
 #--------------------------------------------------------------------------
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-2-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod4-danube-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-2-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-arm-pod4-danube-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-2-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod4-danube-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-2-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod4-danube-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-2-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod4-danube-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-2-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-ha-arm-pod4-danube-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-2-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-noha-arm-pod4-danube-trigger'
     triggers:
         - timed: ''
index 2e5aa39..e445e08 100755 (executable)
@@ -33,10 +33,10 @@ fi
 # set deployment parameters
 export TMPDIR=${WORKSPACE}/tmpdir
 
-# arm-pod3-2 is an aarch64 jenkins slave for the same POD as the
+# arm-pod4 is an aarch64 jenkins slave for the same POD as the
 # x86 jenkins slave arm-pod3; therefore we use the same pod name
 # to deploy the pod from both jenkins slaves
-if [[ "${NODE_NAME}" == "arm-pod3-2" ]]; then
+if [[ "${NODE_NAME}" == "arm-pod4" ]]; then
     NODE_NAME="arm-pod3"
 fi
 
index a0abb93..5dced2a 100644 (file)
@@ -70,8 +70,8 @@
                   - branch-compare-type: 'ANT'
                     branch-pattern: '**/{branch}'
     builders:
-        - bottlenecks-hello
-        #- bottlenecks-unit-tests
+        #- bottlenecks-hello
+        - bottlenecks-unit-tests
 
 - job-template:
     name: 'bottlenecks-merge-{stream}'
             # install python packages
             easy_install -U setuptools
             easy_install -U pip
-            pip install -r requirements.txt
+            pip install -r $WORKSPACE/requirements/verify.txt
 
             # unit tests
-            /bin/bash $WORKSPACE/tests.sh
+            /bin/bash $WORKSPACE/verify.sh
 
             deactivate
 
             #!/bin/bash
             set -o errexit
 
-            echo "hello"
+            echo -e "Wellcome to Bottlenecks! \nMerge event is planning to support more functions! "
index 237f894..61845ac 100644 (file)
         #dovetail only master by now, not sync with A/B/C branches
         #here the stream means the SUT stream, dovetail stream is defined in its own job
         - trigger-builds:
-            - project: 'dovetail-compass-{pod}-debug-{stream}'
+            - project: 'dovetail-compass-{pod}-proposed_tests-{stream}'
               current-parameters: false
               predefined-parameters:
                 DEPLOY_SCENARIO={scenario}
index 30c80e6..c321655 100644 (file)
@@ -98,7 +98,7 @@
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-            - project: 'dovetail-compass-{pod}-debug-weekly-{stream}'
+            - project: 'dovetail-compass-{pod}-proposed_tests-weekly-{stream}'
               current-parameters: false
               predefined-parameters:
                 DEPLOY_SCENARIO={scenario}
index f962518..5948245 100644 (file)
             description: "URL to Google Storage."
         - string:
             name: PPA_REPO
-            default: "http://205.177.226.237:9999{ppa-pathname}"
+            default: "http://artifacts.opnfv.org/compass4nfv/package{ppa-pathname}"
         - string:
             name: PPA_CACHE
             default: "$WORKSPACE/work/repo/"
index 14279e6..56f54d8 100644 (file)
             description: "URL to Google Storage."
         - string:
             name: PPA_REPO
-            default: "http://205.177.226.237:9999{ppa-pathname}"
+            default: "http://artifacts.opnfv.org/compass4nfv/package{ppa-pathname}"
         - string:
             name: PPA_CACHE
             default: "$WORKSPACE/work/repo/"
index f6e0685..dc209d6 100644 (file)
                         -v of_port:6653"
             robot_suite="/home/opnfv/repos/odl_test/csit/suites/openflowplugin/Performance/010_Cbench.robot"
 
-            docker run -ti -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
+            docker run -i -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
 
 - builder:
     name: cperf-cleanup
index 04b9b7b..87f5482 100755 (executable)
@@ -4,4 +4,3 @@ echo "--------------------------------------------------------"
 echo "This is diasy4nfv basic job!"
 echo "--------------------------------------------------------"
 
-sudo rm -rf /home/jenkins-ci/opnfv/slave_root/workspace/daisy4nfv-verify-build-master/*
index 375d807..925f68e 100755 (executable)
@@ -1,5 +1,9 @@
 #!/bin/bash
 
+set -o errexit
+set -o nounset
+set -o pipefail
+
 echo "--------------------------------------------------------"
 echo "This is diasy4nfv build job!"
 echo "--------------------------------------------------------"
index 1cc0443..a64c80e 100755 (executable)
@@ -57,12 +57,18 @@ fi
 
 # log info to console
 echo "Downloading the $INSTALLER_TYPE artifact using URL http://$OPNFV_ARTIFACT_URL"
-echo "This could take some time..."
+echo "This could take some time... Now the time is $(date -u)"
 echo "--------------------------------------------------------"
 echo
 
 # download the file
-curl -L -s -o $WORKSPACE/opnfv.bin http://$OPNFV_ARTIFACT_URL > gsutil.bin.log 2>&1
+if [[ "$NODE_NAME" =~ (zte) ]] && [ -x "$(command -v aria2c)" ]; then
+    DOWNLOAD_CMD="aria2c -x 3 --allow-overwrite=true -d $WORKSPACE -o opnfv.bin"
+else
+    DOWNLOAD_CMD="curl -L -s -o $WORKSPACE/opnfv.bin"
+fi
+
+$DOWNLOAD_CMD http://$OPNFV_ARTIFACT_URL > gsutil.bin.log 2>&1
 
 # list the file
 ls -al $WORKSPACE/opnfv.bin
index c677ef9..807d436 100644 (file)
         # functest-suite-parameter
         - string:
             name: FUNCTEST_SUITE_NAME
-            default: '{project}'
+            default: 'doctor-notification'
         - string:
             name: TESTCASE_OPTIONS
             default: '-e INSPECTOR_TYPE={inspector} -e PROFILER_TYPE={profiler} -v $WORKSPACE:/home/opnfv/repos/doctor'
index 8690480..682948d 100644 (file)
             SUT: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
+        - arm-virtual1:
+            slave-label: '{pod}'
+            SUT: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - zte-pod1:
+            slave-label: zte-pod1
+            SUT: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - zte-pod2:
+            slave-label: zte-pod2
+            SUT: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - zte-pod3:
+            slave-label: zte-pod3
+            SUT: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - zte-pod1:
+            slave-label: zte-pod1
+            SUT: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *danube
+        - zte-pod3:
+            slave-label: zte-pod3
+            SUT: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *danube
 #--------------------------------
     testsuite:
         - 'debug'
         - 'compliance_set'
+        - 'proposed_tests'
 
     jobs:
         - 'dovetail-{SUT}-{pod}-{testsuite}-{stream}'
index 5161a3c..cee9e59 100755 (executable)
@@ -32,10 +32,11 @@ if ! sudo iptables -C FORWARD -j RETURN 2> ${redirect} || ! sudo iptables -L FOR
     sudo iptables -I FORWARD -j RETURN
 fi
 
+releng_repo=${WORKSPACE}/releng
+[ -d ${releng_repo} ] && sudo rm -rf ${releng_repo}
+git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
+
 if [[ ${INSTALLER_TYPE} != 'joid' ]]; then
-    releng_repo=${WORKSPACE}/releng
-    [ -d ${releng_repo} ] && sudo rm -rf ${releng_repo}
-    git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
     ${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} >${redirect}
 fi
 
@@ -47,16 +48,62 @@ else
     exit 1
 fi
 
+sudo pip install virtualenv
+
+cd ${releng_repo}/modules
+sudo virtualenv venv
+source venv/bin/activate
+sudo pip install -e ./ >/dev/null
+
+if [[ ${INSTALLER_TYPE} == compass ]]; then
+    options="-u root -p root"
+elif [[ ${INSTALLER_TYPE} == fuel ]]; then
+    options="-u root -p r00tme"
+else
+    echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
+    echo "HA test cases may not run properly."
+fi
+
+pod_file_dir="/home/opnfv/dovetail/userconfig"
+if [ -d ${pod_file_dir} ]; then
+    sudo rm -r ${pod_file_dir}/*
+else
+    sudo mkdir -p ${pod_file_dir}
+fi
+cmd="sudo python ${releng_repo}/utils/create_pod_file.py -t ${INSTALLER_TYPE} -i ${INSTALLER_IP} ${options} -f ${pod_file_dir}/pod.yaml"
+echo ${cmd}
+${cmd}
+
+deactivate
+
+cd ${WORKSPACE}
+
+if [ -f ${pod_file_dir}/pod.yaml ]; then
+    echo "file ${pod_file_dir}/pod.yaml:"
+    cat ${pod_file_dir}/pod.yaml
+else
+    echo "Error: There doesn't exist file ${pod_file_dir}/pod.yaml."
+    echo "HA test cases may not run properly."
+fi
+
+ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+
+if [ "$INSTALLER_TYPE" == "fuel" ]; then
+    echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
+    sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${pod_file_dir}/id_rsa
+fi
+
 opts="--privileged=true -id"
 results_envs="-v /var/run/docker.sock:/var/run/docker.sock \
               -v /home/opnfv/dovetail/results:/home/opnfv/dovetail/results"
 openrc_volume="-v ${OPENRC}:${OPENRC}"
+userconfig_volume="-v ${pod_file_dir}:${pod_file_dir}"
 
 # Pull the image with correct tag
 echo "Dovetail: Pulling image opnfv/dovetail:${DOCKER_TAG}"
 docker pull opnfv/dovetail:$DOCKER_TAG >$redirect
 
-cmd="docker run ${opts} ${results_envs} ${openrc_volume} \
+cmd="docker run ${opts} ${results_envs} ${openrc_volume} ${userconfig_volume} \
      ${sshkey} opnfv/dovetail:${DOCKER_TAG} /bin/bash"
 echo "Dovetail: running docker run command: ${cmd}"
 ${cmd} >${redirect}
index 915feb5..700657d 100644 (file)
@@ -46,6 +46,7 @@
     testsuite:
         - 'debug'
         - 'compliance_set'
+        - 'proposed_tests'
 
     loop:
         - 'weekly':
index 32abad6..2fa8687 100644 (file)
@@ -73,8 +73,8 @@
             auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
         - 'os-odl_l2-sfc-ha':
             auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
-        - 'os-odl_l2-bgpvpn-ha':
-            auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
+        - 'os-odl_l2-bgpvpn-ha':
+        #    auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
         - 'os-nosdn-kvm-ha':
             auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
         - 'os-nosdn-ovs-ha':
     jobs:
         - 'fuel-{scenario}-{pod}-daily-{stream}'
         - 'fuel-deploy-{pod}-daily-{stream}'
+        - 'fuel-os-odl_l2-bgpvpn-ha-{pod}-daily-{stream}'
 
 ########################
 # job templates
         - email:
             recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com
 
+- job-template:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-{pod}-daily-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    concurrent: false
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 4
+            max-per-node: 1
+            option: 'project'
+        - build-blocker:
+            use-build-blocker: true
+            blocking-jobs:
+                - 'fuel-os-.*?-{pod}-daily-.*'
+                - 'fuel-os-.*?-{pod}-weekly-.*'
+            block-level: 'NODE'
+
+    wrappers:
+        - build-name:
+            name: '$BUILD_NUMBER - Scenario: os-odl_l2-bgpvpn-ha'
+
+    triggers:
+        - 'fuel-os-odl_l2-bgpvpn-ha-{pod}-daily-{stream}-trigger'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - '{installer}-defaults'
+        - '{slave-label}-defaults':
+            installer: '{installer}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: "os-odl_l2-bgpvpn-ha"
+        - fuel-ci-parameter:
+            gs-pathname: '{gs-pathname}'
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - trigger-builds:
+            - project: 'fuel-deploy-{pod}-daily-{stream}'
+              current-parameters: false
+              predefined-parameters:
+                DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+              same-node: true
+              block: true
+        - trigger-builds:
+            - project: 'functest-fuel-{pod}-daily-{stream}'
+              current-parameters: false
+              predefined-parameters:
+                DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+              same-node: true
+              block: true
+              block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+        - trigger-builds:
+            - project: 'yardstick-fuel-{pod}-daily-{stream}'
+              current-parameters: false
+              predefined-parameters:
+                DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+              block: true
+              same-node: true
+              block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+        # 1.dovetail only master by now, not sync with A/B/C branches
+        # 2.here the stream means the SUT stream, dovetail stream is defined in its own job
+        # 3.only debug testsuite here(includes basic testcase,
+        #   i.e. refstack ipv6 vpn test cases from functest, HA test case
+        #   from yardstick)
+        # 4.not used for release criteria or compliance,
+        #   only to debug the dovetail tool bugs with fuel bgpvpn scenario
+        - trigger-builds:
+            - project: 'dovetail-fuel-{pod}-proposed_tests-{stream}'
+              current-parameters: false
+              predefined-parameters:
+                DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+              block: true
+              same-node: true
+              block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+
+    publishers:
+        - email:
+            recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com matthew.lijun@huawei.com
+
+
 - job-template:
     name: 'fuel-deploy-{pod}-daily-{stream}'
 
index e8d1432..3c04a4a 100644 (file)
             slave-label: '{pod}'
             installer: fuel
             <<: *master
-        - arm-pod3-2:
+        - arm-pod4:
+            slave-label: '{pod}'
+            installer: fuel
+            <<: *master
+        - arm-virtual1:
             slave-label: '{pod}'
             installer: fuel
             <<: *master
             slave-label: '{pod}'
             installer: fuel
             <<: *danube
-        - arm-pod3-2:
+        - arm-pod4:
+            slave-label: '{pod}'
+            installer: fuel
+            <<: *danube
+        - arm-virtual1:
             slave-label: '{pod}'
             installer: fuel
             <<: *danube
                 - 'vims'
                 - 'multisite'
                 - 'parser'
+                - 'opera_vims'
         - string:
             name: TESTCASE_OPTIONS
             default: ''
index 893c428..869c395 100755 (executable)
@@ -1,15 +1,9 @@
 #!/bin/bash
 set +e
 
-branch=${GIT_BRANCH##*/}
-[[ "$PUSH_RESULTS_TO_DB" == "true" ]] && flags+="-r"
-if [[ "$BRANCH" =~ 'brahmaputra' ]]; then
-    cmd="${FUNCTEST_REPO_DIR}/docker/run_tests.sh -s ${flags}"
-elif [[ "$BRANCH" =~ 'colorado' ]]; then
-    cmd="python ${FUNCTEST_REPO_DIR}/ci/run_tests.py -t all ${flags}"
-else
-    cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/run_tests.py -t all ${flags}"
-fi
+
+cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/run_tests.py -t all ${flags}"
+
 container_id=$(docker ps -a | grep opnfv/functest | awk '{print $1}' | head -1)
 docker exec $container_id $cmd
 
index 05e3d57..1acf0a2 100755 (executable)
@@ -70,6 +70,15 @@ envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
     -e NODE_NAME=${NODE_NAME} -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO} \
     -e BUILD_TAG=${BUILD_TAG} -e CI_DEBUG=${CI_DEBUG} -e DEPLOY_TYPE=${DEPLOY_TYPE}"
 
+if [[ ${INSTALLER_TYPE} == 'compass' && ${DEPLOY_SCENARIO} == *'os-nosdn-openo-ha'* ]]; then
+    ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+    openo_msb_port=${openo_msb_port:-80}
+    openo_msb_endpoint="$(sshpass -p'root' ssh 2>/dev/null $ssh_options root@${installer_ip} \
+    'mysql -ucompass -pcompass -Dcompass -e "select package_config from cluster;" \
+    | sed s/,/\\n/g | grep openo_ip | cut -d \" -f 4'):$openo_msb_port"
+
+    envs=${env}" -e OPENO_MSB_ENDPOINT=${openo_msb_endpoint}"
+fi
 
 volumes="${results_vol} ${sshkey_vol} ${stackrc_vol} ${rc_file_vol}"
 
@@ -103,12 +112,8 @@ if [ $(docker ps | grep "${FUNCTEST_IMAGE}:${DOCKER_TAG}" | wc -l) == 0 ]; then
     echo "The container ${FUNCTEST_IMAGE} with ID=${container_id} has not been properly started. Exiting..."
     exit 1
 fi
-if [[ "$BRANCH" =~ 'brahmaputra' ]]; then
-    cmd="${FUNCTEST_REPO_DIR}/docker/prepare_env.sh"
-elif [[ "$BRANCH" =~ 'colorado' ]]; then
-    cmd="python ${FUNCTEST_REPO_DIR}/ci/prepare_env.py start"
-else
-    cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/prepare_env.py start"
-fi
+
+cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/prepare_env.py start"
+
 echo "Executing command inside the docker: ${cmd}"
 docker exec ${container_id} ${cmd}
index 1905a09..fad06b0 100644 (file)
             default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
             description: 'Base URI to the configuration directory'
 - parameter:
-    name: 'arm-pod3-2-defaults'
+    name: 'arm-pod4-defaults'
     parameters:
         - node:
             name: SLAVE_NAME
             description: 'Slave name on Jenkins'
             allowed-slaves:
-                - arm-pod3-2
+                - arm-pod4
             default-slaves:
-                - arm-pod3-2
+                - arm-pod4
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: 'Git URL to use on this Jenkins Slave'
+        - string:
+            name: LAB_CONFIG_URL
+            default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+            description: 'Base URI to the configuration directory'
+- parameter:
+    name: 'arm-virtual1-defaults'
+    parameters:
+        - node:
+            name: SLAVE_NAME
+            description: 'Slave name on Jenkins'
+            allowed-slaves:
+                - arm-virtual1
+            default-slaves:
+                - arm-virtual1
         - string:
             name: GIT_BASE
             default: https://gerrit.opnfv.org/gerrit/$PROJECT
index 7dc7189..13ea9b3 100644 (file)
         # 4.not used for release criteria or compliance,
         #   only to debug the dovetail tool bugs with joid
         #- trigger-builds:
-        #    - project: 'dovetail-joid-{pod}-debug-{stream}'
+        #    - project: 'dovetail-joid-{pod}-proposed_tests-{stream}'
         #      current-parameters: false
         #      predefined-parameters:
         #        DEPLOY_SCENARIO={scenario}
index 8d607f9..9624778 100644 (file)
@@ -11,7 +11,7 @@
         - danube:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
-            disabled: false
+            disabled: true
 #####################################
 # patch verification phases
 #####################################
index 5d2cc03..596d377 100644 (file)
@@ -6,30 +6,32 @@
 #####################################
 # branch definitions
 #####################################
-    stream:
-        - master:
-            branch: '{stream}'
-            gs-pathname: ''
-            disabled: false
+    master: &master
+        stream: master
+        branch: '{stream}'
+        gs-pathname: ''
+        disabled: false
 
 #####################################
-# patch verification phases
+# pod definitions
 #####################################
-    phase:
-        - 'basic'
-        - 'deploy'
+    pod:
+        - virtual:
+            slave-label: 'huawei-virtual7'
+            os-version: 'xenial'
+            <<: *master
 
 #####################################
 # jobs
 #####################################
     jobs:
-        - 'opera-daily-{stream}'
-        - 'opera-daily-{phase}-{stream}'
+        - 'opera-{pod}-daily-{stream}'
+
 #####################################
 # job templates
 #####################################
 - job-template:
-    name: 'opera-daily-{stream}'
+    name: 'opera-{pod}-daily-{stream}'
 
     project-type: multijob
 
         - project-parameter:
             project: '{project}'
             branch: '{branch}'
-        - 'huawei-virtual7-defaults'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: os-nosdn-openo-ha
+        - '{slave-label}-defaults'
 
     builders:
         - description-setter:
             description: "Built on $NODE_NAME"
         - multijob:
-            name: basic
+            name: deploy
             condition: SUCCESSFUL
             projects:
-                - name: 'opera-daily-basic-{stream}'
-                  current-parameters: true
+                - name: 'compass-deploy-{pod}-daily-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    DEPLOY_SCENARIO=os-nosdn-openo-ha
+                    COMPASS_OS_VERSION=xenial
                   node-parameters: true
                   kill-phase-on: FAILURE
                   abort-all-job: true
         - multijob:
-            name: deploy
+            name: functest
             condition: SUCCESSFUL
             projects:
-                - name: 'compass-deploy-virtual-daily-{stream}'
+                - name: 'functest-compass-{pod}-suite-{stream}'
                   current-parameters: false
                   predefined-parameters: |
                     DEPLOY_SCENARIO=os-nosdn-openo-ha
-                    COMPASS_OS_VERSION=xenial
+                    FUNCTEST_SUITE_NAME=opera_vims
                   node-parameters: true
-                  kill-phase-on: FAILURE
+                  kill-phase-on: NEVER
                   abort-all-job: true
-#        - multijob:
-#            name: functest
-#            condition: SUCCESSFUL
-#            projects:
-#                - name: 'functest-compass-baremetal-suite-{stream}'
-#                  current-parameters: false
-#                  predefined-parameters:
-#                    FUNCTEST_SUITE_NAME=opera
-#                  node-parameters: true
-#                  kill-phase-on: NEVER
-#                  abort-all-job: true
-
-- job-template:
-    name: 'opera-daily-{phase}-{stream}'
-
-    disabled: '{obj:disabled}'
-
-    concurrent: true
-
-    properties:
-        - logrotate-default
-        - throttle:
-            enabled: true
-            max-per-node: 1
-            option: 'project'
-
-    scm:
-        - git-scm
-
-    wrappers:
-        - ssh-agent-wrapper
-        - timeout:
-            timeout: 120
-            fail: true
-
-    builders:
-        - description-setter:
-            description: "Built on $NODE_NAME"
-        - '{project}-daily-{phase}-macro'
-
-#####################################
-# builder macros
-#####################################
-- builder:
-    name: 'opera-daily-basic-macro'
-    builders:
-        - shell: |
-            #!/bin/bash
-            echo "Hello world!"
-
-- builder:
-    name: 'opera-daily-deploy-macro'
-    builders:
-        - shell: |
-            #!/bin/bash
-            echo "Hello world!"
-
index ba540ed..417fc70 100644 (file)
         receivers: >
             cristina.pauna@enea.com
             alexandru.avadanii@enea.com
+    dovetail-arm-receivers: &dovetail-arm-receivers
+        receivers: >
+            cristina.pauna@enea.com
+            alexandru.avadanii@enea.com
+            alexandru.nemes@enea.com
     other-receivers: &other-receivers
         receivers: ''
 
@@ -26,6 +31,9 @@
         - 'functest':
             <<: *master
             <<: *functest-arm-receivers
+        - 'dovetail':
+            <<: *master
+            <<: *dovetail-arm-receivers
         # projects with jobs for stable
 
     jobs:
index 5d73a9d..2aa52ad 100644 (file)
@@ -75,14 +75,11 @@ echo "Current branch: $BRANCH"
 
 if [[ "$BRANCH" == "master" ]]; then
     DOCKER_TAG="latest"
+elif [[ -n "${RELEASE_VERSION-}" ]]; then
+    DOCKER_TAG=${BRANCH##*/}.${RELEASE_VERSION}
+    # e.g. danube.1.0, danube.2.0, danube.3.0
 else
-    if [[ -n "${RELEASE_VERSION-}" ]]; then
-        release=${BRANCH##*/}
-        DOCKER_TAG=${release}.${RELEASE_VERSION}
-        # e.g. colorado.1.0, colorado.2.0, colorado.3.0
-    else
-        DOCKER_TAG="stable"
-    fi
+    DOCKER_TAG="stable"
 fi
 
 # Start the build
@@ -90,6 +87,9 @@ echo "Building docker image: $DOCKER_REPO_NAME:$DOCKER_TAG"
 echo "--------------------------------------------------------"
 echo
 if [[ $DOCKER_REPO_NAME == *"dovetail"* ]]; then
+    if [[ -n "${RELEASE_VERSION-}" ]]; then
+        DOCKER_TAG=${RELEASE_VERSION}
+    fi
     cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG -f $DOCKERFILE ."
 else
     cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BRANCH
diff --git a/jjb/securedlab/check-jinja2.sh b/jjb/securedlab/check-jinja2.sh
new file mode 100755 (executable)
index 0000000..57650ec
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/bash
+set +x
+set -o errexit
+for lab_configs in $(find labs/ -name 'pod.yaml'); do
+        while IFS= read -r jinja_templates; do
+          echo "./utils/generate_config.py -y $lab_configs -j $jinja_templates"
+          ./utils/generate_config.py -y $lab_configs -j $jinja_templates
+        done < <(find installers/ -name '*.j2')
+done
diff --git a/jjb/securedlab/check-jinja2.yml b/jjb/securedlab/check-jinja2.yml
new file mode 100644 (file)
index 0000000..1e85536
--- /dev/null
@@ -0,0 +1,80 @@
+########################
+# Job configuration to validate jninja2 files
+########################
+- project:
+
+    name: validate-templates
+
+    project: 'securedlab'
+
+    jobs:
+        - 'validate-jinja2-templates-{stream}'
+
+    stream:
+        - master:
+            branch: '{stream}'
+            disabled: false
+        - danube:
+            branch: 'stable/{stream}'
+            disabled: false
+
+########################
+# job templates
+########################
+
+- job-template:
+    name: 'validate-jinja2-templates-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    concurrent: true
+
+    parameters:
+        - project-parameter:
+            project: $GERRIT_PROJECT
+            branch: '{branch}'
+        - node:
+            name: SLAVE_NAME
+            description: Slave to execute jnija template test
+            default-slaves:
+                - lf-build1
+            allowed-multiselect: true
+            ignore-offline-nodes: true
+
+    scm:
+        - git-scm-gerrit
+
+    triggers:
+        - gerrit:
+            server-name: 'gerrit.opnfv.org'
+            trigger-on:
+                - patchset-created-event:
+                    exclude-drafts: 'false'
+                    exclude-trivial-rebase: 'false'
+                    exclude-no-code-change: 'false'
+                - draft-published-event
+                - comment-added-contains-event:
+                    comment-contains-value: 'recheck'
+                - comment-added-contains-event:
+                    comment-contains-value: 'reverify'
+            projects:
+              - project-compare-type: 'REG_EXP'
+                project-pattern: '{project}'
+                branches:
+                  - branch-compare-type: 'ANT'
+                    branch-pattern: '**/{branch}'
+                file-paths:
+                  - compare-type: ANT
+                    pattern: 'utils/generate_config.yml'
+                  - compare-type: ANT
+                    pattern: '**/*.jinja2'
+                  - compare-type: ANT
+                    pattern: '**/*.yaml'
+    builders:
+        - check-jinja
+
+- builder:
+    name: check-jinja
+    builders:
+        - shell:
+            !include-raw-escape: ./check-jinja2.sh
index 4724c2e..b37da90 100755 (executable)
@@ -82,13 +82,13 @@ sudo -E ./scripts/destroy-env.sh
 
 # provision VMs for the flavor
 cd /opt/bifrost
-sudo -E ./scripts/bifrost-provision.sh
+./scripts/bifrost-provision.sh
 
 # list the provisioned VMs
 cd /opt/bifrost
 source env-vars
 ironic node-list
-virsh list
+sudo -H -E virsh list
 
 echo "OpenStack nodes are provisioned!"
 # here we have to do something in order to capture what was the working sha1
index f596d75..18019a7 100755 (executable)
@@ -113,14 +113,14 @@ sudo /bin/cp -rf /opt/releng/prototypes/bifrost/* /opt/bifrost/
 
 # cleanup remnants of previous deployment
 cd /opt/bifrost
-sudo -E ./scripts/destroy-env.sh
+sudo -H -E ./scripts/destroy-env.sh
 
 # provision 3 VMs; xcimaster, controller, and compute
 cd /opt/bifrost
-sudo -E ./scripts/bifrost-provision.sh
+./scripts/bifrost-provision.sh
 
 # list the provisioned VMs
 cd /opt/bifrost
 source env-vars
 ironic node-list
-virsh list
+sudo -H -E virsh list
index 1f2f312..5ff36f8 100644 (file)
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *danube
+        - arm-virtual1:
+            slave-label: '{pod}'
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - arm-virtual1:
+            slave-label: '{pod}'
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *danube
         - orange-pod2:
             slave-label: '{pod}'
             installer: joid
             name: YARDSTICK_DB_BACKEND
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
+- parameter:
+    name: 'yardstick-params-arm-virtual1'
+    parameters:
+        - string:
+            name: YARDSTICK_DB_BACKEND
+            default: '-i 104.197.68.199:8086'
+            description: 'Arguments to use in order to choose the backend DB'
 - parameter:
     name: 'yardstick-params-joid-baremetal'
     parameters:
index 856c7fc..38aa452 100644 (file)
@@ -7,6 +7,7 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 
+import json
 import netaddr
 import re
 
@@ -161,9 +162,10 @@ class CompassAdapter(manager.DeploymentHandler):
             fields = lines[i].strip().encode().rsplit('\t')
             host_id = fields[0].strip().encode()
             name = 'host{0}'.format(host_id)
-            node_roles = fields[1].strip().encode().lower()
+            node_roles_str = fields[1].strip().encode().lower()
+            node_roles_list = json.loads(node_roles_str)
             node_roles = [manager.Role.ODL if x == 'odl'
-                          else x for x in node_roles]
+                          else x for x in node_roles_list]
             roles = [x for x in [manager.Role.CONTROLLER,
                                  manager.Role.COMPUTE,
                                  manager.Role.ODL,
index 310eca8..94de628 100644 (file)
@@ -35,6 +35,7 @@
   become: yes
   gather_facts: yes
   roles:
+    - role: bifrost-keystone-install
     - role: bifrost-ironic-install
       cleaning: false
       testing: true
       dib_elements: "vm enable-serial-console simple-init devuser growroot {{ extra_dib_elements }}"
       dib_packages: "{{ lookup('env', 'DIB_OS_PACKAGES') }}"
       when: create_image_via_dib | bool == true and transform_boot_image | bool == false
+    - role: bifrost-keystone-client-config
+      user: "{{ ansible_env.SUDO_USER }}"
+      clouds:
+        bifrost:
+          config_username: "{{ ironic.keystone.default_username }}"
+          config_password: "{{ ironic.keystone.default_password }}"
+          config_project_name: "baremetal"
+          config_region_name: "{{ keystone.bootstrap.region_name }}"
+          config_auth_url: "{{ keystone.bootstrap.public_url }}"
   environment:
     http_proxy: "{{ lookup('env','http_proxy') }}"
     https_proxy: "{{ lookup('env','https_proxy') }}"
index d3b28ee..0561962 100755 (executable)
@@ -70,7 +70,6 @@ export EXTRA_DIB_ELEMENTS=${EXTRA_DIB_ELEMENTS:-"openssh-server"}
 # Source Ansible
 set +x +o nounset
 $SCRIPT_HOME/env-setup.sh
-source ${ANSIBLE_INSTALL_ROOT}/ansible/hacking/env-setup
 ANSIBLE=$(which ansible-playbook)
 set -x -o nounset
 
@@ -121,7 +120,8 @@ ${ANSIBLE} ${ANSIBLE_VERBOSITY} \
     -e create_ipa_image=${CREATE_IPA_IMAGE} \
     -e write_interfaces_file=${WRITE_INTERFACES_FILE} \
     -e ipv4_gateway=192.168.122.1 \
-    -e wait_timeout=${PROVISION_WAIT_TIMEOUT}
+    -e wait_timeout=${PROVISION_WAIT_TIMEOUT} \
+    -e enable_keystone=false
 EXITCODE=$?
 
 if [ $EXITCODE != 0 ]; then
index d570f10..c75e814 100755 (executable)
@@ -16,6 +16,8 @@ fi
 
 # Start fresh
 rm -rf /opt/stack
+# HOME is normally set by sudo -H
+rm -rf ${HOME}/.config/openstack
 
 # Delete all libvirt VMs and hosts from vbmc (look for a port number)
 for vm in $(vbmc list | awk '/[0-9]/{{ print $2 }}'); do
index 1f4ad06..538fe17 100644 (file)
@@ -47,7 +47,7 @@
   remote_user: root
   tasks:
     - name: make nfs dir
-      file: "dest=/images mode=777 state=directory"
+      file: "dest=/images mode=0777 state=directory"
     - name: configure sdrvice
       shell: "echo 'nfs        2049/tcp' >>  /etc/services && echo 'nfs        2049/udp' >>  /etc/services"
     - name: configure NFS
diff --git a/prototypes/xci/file/exports b/prototypes/xci/file/exports
deleted file mode 100644 (file)
index af64d61..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-# /etc/exports: the access control list for filesystems which may be exported
-#               to NFS clients.  See exports(5).
-#
-# Example for NFSv2 and NFSv3:
-# /srv/homes       hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
-#
-# Example for NFSv4:
-# /srv/nfs4        gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
-# /srv/nfs4/homes  gss/krb5i(rw,sync,no_subtree_check)
-#
-# glance images are stored on compute host and made available to image hosts via nfs
-# see image_hosts section in openstack_user_config.yml for details
-/images         *(rw,sync,no_subtree_check,no_root_squash)
-
index 3cd1d62..167502c 100644 (file)
@@ -1,37 +1,39 @@
 ---
 host_info: {
     'opnfv': {
-        'MGMT_IP': '172.29.236.10',
         'VLAN_IP': '192.168.122.2',
+        'MGMT_IP': '172.29.236.10',
+        'VXLAN_IP': '172.29.240.10',
         'STORAGE_IP': '172.29.244.10'
     },
     'controller00': {
-        'MGMT_IP': '172.29.236.11',
         'VLAN_IP': '192.168.122.3',
+        'MGMT_IP': '172.29.236.11',
+        'VXLAN_IP': '172.29.240.11',
         'STORAGE_IP': '172.29.244.11'
     },
     'controller01': {
-        'MGMT_IP': '172.29.236.12',
         'VLAN_IP': '192.168.122.4',
+        'MGMT_IP': '172.29.236.12',
+        'VXLAN_IP': '172.29.240.12',
         'STORAGE_IP': '172.29.244.12'
     },
     'controller02': {
-        'MGMT_IP': '172.29.236.13',
         'VLAN_IP': '192.168.122.5',
+        'MGMT_IP': '172.29.236.13',
+        'VXLAN_IP': '172.29.240.13',
         'STORAGE_IP': '172.29.244.13'
     },
     'compute00': {
-        'MGMT_IP': '172.29.236.14',
         'VLAN_IP': '192.168.122.6',
-        'STORAGE_IP': '172.29.244.14',
-        'VLAN_IP_SECOND': '173.29.241.1',
-        'VXLAN_IP': '172.29.240.14'
+        'MGMT_IP': '172.29.236.14',
+        'VXLAN_IP': '172.29.240.14',
+        'STORAGE_IP': '172.29.244.14'
     },
     'compute01': {
-        'MGMT_IP': '172.29.236.15',
         'VLAN_IP': '192.168.122.7',
-        'STORAGE_IP': '172.29.244.15',
-        'VLAN_IP_SECOND': '173.29.241.2',
-        'VXLAN_IP': '172.29.240.15'
+        'MGMT_IP': '172.29.236.15',
+        'VXLAN_IP': '172.29.240.15',
+        'STORAGE_IP': '172.29.244.15'
     }
 }
index 0c43702..09fb734 100644 (file)
@@ -138,7 +138,7 @@ image_hosts:
     container_vars:
       limit_container_types: glance
       glance_nfs_client:
-        - server: "172.29.244.15"
+        - server: "172.29.244.14"
           remote_path: "/images"
           local_path: "/var/lib/glance/images"
           type: "nfs"
@@ -148,7 +148,7 @@ image_hosts:
     container_vars:
       limit_container_types: glance
       glance_nfs_client:
-        - server: "172.29.244.15"
+        - server: "172.29.244.14"
           remote_path: "/images"
           local_path: "/var/lib/glance/images"
           type: "nfs"
@@ -158,7 +158,7 @@ image_hosts:
     container_vars:
       limit_container_types: glance
       glance_nfs_client:
-        - server: "172.29.244.15"
+        - server: "172.29.244.14"
           remote_path: "/images"
           local_path: "/var/lib/glance/images"
           type: "nfs"
@@ -218,28 +218,37 @@ storage_hosts:
     container_vars:
       cinder_backends:
         limit_container_types: cinder_volume
-        lvm:
-          volume_group: cinder-volumes
-          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-          volume_backend_name: LVM_iSCSI
-          iscsi_ip_address: "172.29.244.11"
+        nfs_volume:
+          volume_backend_name: NFS_VOLUME1
+          volume_driver: cinder.volume.drivers.nfs.NfsDriver
+          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+          nfs_shares_config: /etc/cinder/nfs_shares
+          shares:
+            - ip: "172.29.244.14"
+              share: "/volumes"
   controller01:
     ip: 172.29.236.12
     container_vars:
       cinder_backends:
         limit_container_types: cinder_volume
-        lvm:
-          volume_group: cinder-volumes
-          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-          volume_backend_name: LVM_iSCSI
-          iscsi_ip_address: "172.29.244.12"
+        nfs_volume:
+          volume_backend_name: NFS_VOLUME1
+          volume_driver: cinder.volume.drivers.nfs.NfsDriver
+          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+          nfs_shares_config: /etc/cinder/nfs_shares
+          shares:
+            - ip: "172.29.244.14"
+              share: "/volumes"
   controller02:
     ip: 172.29.236.13
     container_vars:
       cinder_backends:
         limit_container_types: cinder_volume
-        lvm:
-          volume_group: cinder-volumes
-          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-          volume_backend_name: LVM_iSCSI
-          iscsi_ip_address: "172.29.244.13"
+        nfs_volume:
+          volume_backend_name: NFS_VOLUME1
+          volume_driver: cinder.volume.drivers.nfs.NfsDriver
+          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+          nfs_shares_config: /etc/cinder/nfs_shares
+          shares:
+            - ip: "172.29.244.14"
+              share: "/volumes"
diff --git a/prototypes/xci/file/install-ansible.sh b/prototypes/xci/file/install-ansible.sh
new file mode 100644 (file)
index 0000000..daa7f51
--- /dev/null
@@ -0,0 +1,136 @@
+#!/bin/bash
+# NOTE(hwoarang): Most parts of this this file were taken from the
+# bifrost repository (scripts/install-deps.sh). This script contains all
+# the necessary distro specific code to install ansible and it's dependencies.
+
+set -eu
+
+declare -A PKG_MAP
+
+CHECK_CMD_PKGS=(
+    libffi
+    libopenssl
+    net-tools
+    python-devel
+)
+
+# Check zypper before apt-get in case zypper-aptitude
+# is installed
+if [ -x '/usr/bin/zypper' ]; then
+    OS_FAMILY="Suse"
+    INSTALLER_CMD="sudo -H -E zypper install -y"
+    CHECK_CMD="zypper search --match-exact --installed"
+    PKG_MAP=(
+        [gcc]=gcc
+        [git]=git
+        [libffi]=libffi-devel
+        [libopenssl]=libopenssl-devel
+        [net-tools]=net-tools
+        [python]=python
+        [python-devel]=python-devel
+        [venv]=python-virtualenv
+        [wget]=wget
+    )
+    EXTRA_PKG_DEPS=( python-xml )
+    # NOTE (cinerama): we can't install python without removing this package
+    # if it exists
+    if $(${CHECK_CMD} patterns-openSUSE-minimal_base-conflicts &> /dev/null); then
+        sudo -H zypper remove -y patterns-openSUSE-minimal_base-conflicts
+    fi
+elif [ -x '/usr/bin/apt-get' ]; then
+    OS_FAMILY="Debian"
+    INSTALLER_CMD="sudo -H -E apt-get -y install"
+    CHECK_CMD="dpkg -l"
+    PKG_MAP=( [gcc]=gcc
+              [git]=git
+              [libffi]=libffi-dev
+              [libopenssl]=libssl-dev
+              [net-tools]=net-tools
+              [python]=python-minimal
+              [python-devel]=libpython-dev
+              [venv]=python-virtualenv
+              [wget]=wget
+            )
+    EXTRA_PKG_DEPS=()
+elif [ -x '/usr/bin/dnf' ] || [ -x '/usr/bin/yum' ]; then
+    OS_FAMILY="RedHat"
+    PKG_MANAGER=$(which dnf || which yum)
+    INSTALLER_CMD="sudo -H -E ${PKG_MANAGER} -y install"
+    CHECK_CMD="rpm -q"
+    PKG_MAP=(
+        [gcc]=gcc
+        [git]=git
+        [libffi]=libffi-devel
+        [libopenssl]=openssl-devel
+        [net-tools]=net-tools
+        [python]=python
+        [python-devel]=python-devel
+        [venv]=python-virtualenv
+        [wget]=wget
+    )
+    EXTRA_PKG_DEPS=()
+else
+    echo "ERROR: Supported package manager not found.  Supported: apt,yum,zypper"
+fi
+
+if ! $(python --version &>/dev/null); then
+    ${INSTALLER_CMD} ${PKG_MAP[python]}
+fi
+if ! $(gcc -v &>/dev/null); then
+    ${INSTALLER_CMD} ${PKG_MAP[gcc]}
+fi
+if ! $(git --version &>/dev/null); then
+    ${INSTALLER_CMD} ${PKG_MAP[git]}
+fi
+if ! $(wget --version &>/dev/null); then
+    ${INSTALLER_CMD} ${PKG_MAP[wget]}
+fi
+
+for pkg in ${CHECK_CMD_PKGS[@]}; do
+    if ! $(${CHECK_CMD} ${PKG_MAP[$pkg]} &>/dev/null); then
+        ${INSTALLER_CMD} ${PKG_MAP[$pkg]}
+    fi
+done
+
+if [ -n "${EXTRA_PKG_DEPS-}" ]; then
+    for pkg in ${EXTRA_PKG_DEPS}; do
+        if ! $(${CHECK_CMD} ${pkg} &>/dev/null); then
+            ${INSTALLER_CMD} ${pkg}
+        fi
+    done
+fi
+
+# If we're using a venv, we need to work around sudo not
+# keeping the path even with -E.
+PYTHON=$(which python)
+
+# To install python packages, we need pip.
+#
+# We can't use the apt packaged version of pip since
+# older versions of pip are incompatible with
+# requests, one of our indirect dependencies (bug 1459947).
+#
+# Note(cinerama): We use pip to install an updated pip plus our
+# other python requirements. pip breakages can seriously impact us,
+# so we've chosen to install/upgrade pip here rather than in
+# requirements (which are synced automatically from the global ones)
+# so we can quickly and easily adjust version parameters.
+# See bug 1536627.
+#
+# Note(cinerama): If pip is linked to pip3, the rest of the install
+# won't work. Remove the alternatives. This is due to ansible's
+# python 2.x requirement.
+if [[ $(readlink -f /etc/alternatives/pip) =~ "pip3" ]]; then
+    sudo -H update-alternatives --remove pip $(readlink -f /etc/alternatives/pip)
+fi
+
+if ! which pip; then
+    wget -O /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py
+    sudo -H -E ${PYTHON} /tmp/get-pip.py
+fi
+
+PIP=$(which pip)
+
+sudo -H -E ${PIP} install "pip>6.0"
+
+pip install ansible==$XCI_ANSIBLE_PIP_VERSION
diff --git a/prototypes/xci/file/mini/configure-targethosts.yml b/prototypes/xci/file/mini/configure-targethosts.yml
deleted file mode 100644 (file)
index 395f44a..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
----
-- hosts: all
-  remote_user: root
-  tasks:
-    - name: add public key to host
-      copy:
-        src: ../file/authorized_keys
-        dest: /root/.ssh/authorized_keys
-    - name: configure modules
-      copy:
-        src: ../file/modules
-        dest: /etc/modules
-
-- hosts: controller
-  remote_user: root
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/flavor-vars.yml
-  roles:
-    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
-
-- hosts: compute
-  remote_user: root
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/flavor-vars.yml
-  roles:
-    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
-  # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
-    - role: configure-nfs
index 01fba71..0d446ba 100644 (file)
@@ -1,19 +1,20 @@
 ---
 host_info: {
     'opnfv': {
-        'MGMT_IP': '172.29.236.10',
         'VLAN_IP': '192.168.122.2',
+        'MGMT_IP': '172.29.236.10',
+        'VXLAN_IP': '172.29.240.10',
         'STORAGE_IP': '172.29.244.10'
     },
     'controller00': {
-        'MGMT_IP': '172.29.236.11',
         'VLAN_IP': '192.168.122.3',
+        'MGMT_IP': '172.29.236.11',
+        'VXLAN_IP': '172.29.240.11',
         'STORAGE_IP': '172.29.244.11'
     },
     'compute00': {
-        'MGMT_IP': '172.29.236.12',
         'VLAN_IP': '192.168.122.4',
-        'VLAN_IP_SECOND': '173.29.241.1',
+        'MGMT_IP': '172.29.236.12',
         'VXLAN_IP': '172.29.240.12',
         'STORAGE_IP': '172.29.244.12'
     },
index 70429ce..f9ccee2 100644 (file)
@@ -160,8 +160,11 @@ storage_hosts:
     container_vars:
       cinder_backends:
         limit_container_types: cinder_volume
-        lvm:
-          volume_group: cinder-volumes
-          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-          volume_backend_name: LVM_iSCSI
-          iscsi_ip_address: "172.29.244.11"
+        nfs_volume:
+          volume_backend_name: NFS_VOLUME1
+          volume_driver: cinder.volume.drivers.nfs.NfsDriver
+          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+          nfs_shares_config: /etc/cinder/nfs_shares
+          shares:
+            - ip: "172.29.244.12"
+              share: "/volumes"
diff --git a/prototypes/xci/file/modules b/prototypes/xci/file/modules
deleted file mode 100644 (file)
index 60a517f..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-# /etc/modules: kernel modules to load at boot time.
-#
-# This file contains the names of kernel modules that should be loaded
-# at boot time, one per line. Lines beginning with "#" are ignored.
-# Parameters can be specified after the module name.
-
-bonding
-8021q
diff --git a/prototypes/xci/file/noha/configure-targethosts.yml b/prototypes/xci/file/noha/configure-targethosts.yml
deleted file mode 100644 (file)
index 6dc147f..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- hosts: all
-  remote_user: root
-  tasks:
-    - name: add public key to host
-      copy:
-        src: ../file/authorized_keys
-        dest: /root/.ssh/authorized_keys
-    - name: configure modules
-      copy:
-        src: ../file/modules
-        dest: /etc/modules
-
-- hosts: controller
-  remote_user: root
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/flavor-vars.yml
-  roles:
-    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
-
-- hosts: compute
-  remote_user: root
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/flavor-vars.yml
-  roles:
-    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
-
-- hosts: compute01
-  remote_user: root
-  # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
-  roles:
-    - role: configure-nfs
index 7f52d34..3c69a34 100644 (file)
@@ -1,26 +1,26 @@
 ---
 host_info: {
     'opnfv': {
-        'MGMT_IP': '172.29.236.10',
         'VLAN_IP': '192.168.122.2',
+        'MGMT_IP': '172.29.236.10',
+        'VXLAN_IP': '172.29.240.10',
         'STORAGE_IP': '172.29.244.10'
     },
     'controller00': {
-        'MGMT_IP': '172.29.236.11',
         'VLAN_IP': '192.168.122.3',
+        'MGMT_IP': '172.29.236.11',
+        'VXLAN_IP': '172.29.240.11',
         'STORAGE_IP': '172.29.244.11'
     },
     'compute00': {
-        'MGMT_IP': '172.29.236.12',
         'VLAN_IP': '192.168.122.4',
-        'VLAN_IP_SECOND': '173.29.241.1',
+        'MGMT_IP': '172.29.236.12',
         'VXLAN_IP': '172.29.240.12',
         'STORAGE_IP': '172.29.244.12'
     },
     'compute01': {
-        'MGMT_IP': '172.29.236.13',
         'VLAN_IP': '192.168.122.5',
-        'VLAN_IP_SECOND': '173.29.241.2',
+        'MGMT_IP': '172.29.236.13',
         'VXLAN_IP': '172.29.240.13',
         'STORAGE_IP': '172.29.244.13'
     }
index 05de6a9..fb12655 100644 (file)
@@ -118,7 +118,7 @@ image_hosts:
     container_vars:
       limit_container_types: glance
       glance_nfs_client:
-        - server: "172.29.244.13"
+        - server: "172.29.244.12"
           remote_path: "/images"
           local_path: "/var/lib/glance/images"
           type: "nfs"
@@ -162,8 +162,11 @@ storage_hosts:
     container_vars:
       cinder_backends:
         limit_container_types: cinder_volume
-        lvm:
-          volume_group: cinder-volumes
-          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-          volume_backend_name: LVM_iSCSI
-          iscsi_ip_address: "172.29.244.11"
+        nfs_volume:
+          volume_backend_name: NFS_VOLUME1
+          volume_driver: cinder.volume.drivers.nfs.NfsDriver
+          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+          nfs_shares_config: /etc/cinder/nfs_shares
+          shares:
+            - ip: "172.29.244.12"
+              share: "/volumes"
index 2a55964..34b974c 100644 (file)
         path: "{{LOG_PATH}}"
         state: directory
         recurse: no
-    # when the deployment is not aio, we use playbook, configure-targethosts.yml, to configure all the hosts
-    - name: copy multihost playbook
-      copy:
-        src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/configure-targethosts.yml"
-        dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks"
-      when: XCI_FLAVOR != "aio"
     # when the deployment is aio, we overwrite and use playbook, configure-opnfvhost.yml, since everything gets installed on opnfv host
     - name: copy aio playbook
       copy:
index 8c794c4..64fcef0 100644 (file)
       shell: "/bin/cp -rf {{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/user_variables.yml {{OPENSTACK_OSA_ETC_PATH}}"
     - name: copy cinder.yml
       shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/cinder.yml {{OPENSTACK_OSA_ETC_PATH}}/env.d"
-    - name: bootstrap ansible on opnfv host
-      command: "/bin/bash ./scripts/bootstrap-ansible.sh"
-      args:
-        chdir: "{{OPENSTACK_OSA_PATH}}"
-    - name: generate password token
-      command: "python pw-token-gen.py --file {{OPENSTACK_OSA_ETC_PATH}}/user_secrets.yml"
-      args:
-        chdir: "{{OPENSTACK_OSA_PATH}}/scripts"
     # TODO: We need to get rid of this as soon as the issue is fixed upstream
     - name: change the haproxy state from disable to enable
       replace:
         replace: '\1haproxy_state: enabled'
     - name: copy OPNFV OpenStack playbook
       shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/setup-openstack.yml {{OPENSTACK_OSA_PATH}}/playbooks"
-    # Copy pinned role requirements if we are running as part of daily CI loop
     - name: copy OPNFV role requirements
       shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/ansible-role-requirements.yml {{OPENSTACK_OSA_PATH}}"
-      when: XCI_LOOP == "daily"
+    - name: bootstrap ansible on opnfv host
+      command: "/bin/bash ./scripts/bootstrap-ansible.sh"
+      args:
+        chdir: "{{OPENSTACK_OSA_PATH}}"
+    - name: generate password token
+      command: "python pw-token-gen.py --file {{OPENSTACK_OSA_ETC_PATH}}/user_secrets.yml"
+      args:
+        chdir: "{{OPENSTACK_OSA_PATH}}/scripts"
 - hosts: localhost
   remote_user: root
   tasks:
@@ -6,10 +6,6 @@
       copy:
         src: ../file/authorized_keys
         dest: /root/.ssh/authorized_keys
-    - name: configure modules
-      copy:
-        src: ../file/modules
-        dest: /etc/modules
 
 - hosts: controller
   remote_user: root
@@ -18,7 +14,9 @@
     - ../var/flavor-vars.yml
   roles:
     # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
+    - { role: configure-network, src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
+    # we need to force sync time with ntp or the nodes will be out of sync timewise
+    - role: synchronize-time
 
 - hosts: compute
   remote_user: root
     - ../var/flavor-vars.yml
   roles:
     # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
+    - { role: configure-network, src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
+    # we need to force sync time with ntp or the nodes will be out of sync timewise
+    - role: synchronize-time
 
-- hosts: compute01
+- hosts: compute00
   remote_user: root
   # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
   roles:
index 8bc8482..aafadf7 100644 (file)
@@ -8,9 +8,27 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # TODO: this role needs to be adjusted for different distros
-- name: configure network for {{ ansible_os_family }} on interface {{ interface }}
-  template:
-    src: "{{ src }}"
-    dest: "{{ dest }}"
-- name: restart ubuntu xenial network service
-  shell: "/sbin/ifconfig {{ interface }} 0 &&/sbin/ifdown -a && /sbin/ifup -a"
+- block:
+    - name: configure modules
+      lineinfile:
+        dest: /etc/modules
+        state: present
+        create: yes
+        line: "8021q"
+    - name: add modules
+      modprobe:
+        name: 8021q
+        state: present
+    - name: ensure glean rules are removed
+      file:
+        path: "/etc/udev/rules.d/99-glean.rules"
+        state: absent
+    - name: ensure interfaces.d folder is empty
+      shell: "/bin/rm -rf /etc/network/interfaces.d/*"
+    - name: ensure interfaces file is updated
+      template:
+        src: "{{ src }}"
+        dest: "{{ dest }}"
+    - name: restart network service
+      shell: "/sbin/ifconfig {{ interface }} 0 && /sbin/ifdown -a && /sbin/ifup -a"
+  when: ansible_distribution_release == "xenial"
index b188f4d..c52da0b 100644 (file)
@@ -9,11 +9,14 @@
 ##############################################################################
 # TODO: this is for xenial and needs to be adjusted for different distros
 - block:
-    - name: make NFS dir
+    - name: make NFS directories
       file:
-        dest: /images
-        mode: 777
+        dest: "{{ item }}"
+        mode: 0777
         state: directory
+      with_items:
+        - "/images"
+        - "/volumes"
     - name: configure NFS service
       lineinfile:
         dest: /etc/services
       with_items:
         - "nfs        2049/tcp"
         - "nfs        2049/udp"
-    - name: configure NFS exports on ubuntu xenial
-      copy:
-        src: ../file/exports
+    - name: configure NFS exports
+      lineinfile:
         dest: /etc/exports
-      when: ansible_distribution_release == "xenial"
+        state: present
+        create: yes
+        line: "{{ item }}"
+      with_items:
+        - "/images         *(rw,sync,no_subtree_check,no_root_squash)"
+        - "/volumes        *(rw,sync,no_subtree_check,no_root_squash)"
     # TODO: the service name might be different on other distros and needs to be adjusted
     - name: restart ubuntu xenial NFS service
       service:
diff --git a/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml b/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml
new file mode 100644 (file)
index 0000000..5c39d89
--- /dev/null
@@ -0,0 +1,18 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# TODO: this role needs to be adjusted for different distros
+- block:
+    - name: restart chrony
+      service:
+        name: chrony
+        state: restarted
+    - name: synchronize time
+      shell: "chronyc -a 'burst 4/4' && chronyc -a makestep"
+  when: ansible_distribution_release == "xenial"
index 0c5147c..094544c 100644 (file)
@@ -1,11 +1,7 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
 # The loopback network interface
 auto lo
 iface lo inet loopback
 
-
 # Physical interface
 auto {{ interface }}
 iface {{ interface }} inet manual
@@ -20,7 +16,7 @@ auto {{ interface }}.30
 iface {{ interface }}.30 inet manual
     vlan-raw-device {{ interface }}
 
-# Storage network VLAN interface (optional)
+# Storage network VLAN interface
 auto {{ interface }}.20
 iface {{ interface }}.20 inet manual
     vlan-raw-device {{ interface }}
@@ -55,6 +51,7 @@ iface br-vlan inet static
     address {{host_info[inventory_hostname].VLAN_IP}}
     netmask 255.255.255.0
     gateway 192.168.122.1
+    dns-nameserver 8.8.8.8 8.8.4.4
     offload-sg off
     # Create veth pair, don't bomb if already exists
     pre-up ip link add br-vlan-veth type veth peer name eth12 || true
@@ -65,17 +62,7 @@ iface br-vlan inet static
     post-down ip link del br-vlan-veth || true
     bridge_ports br-vlan-veth
 
-# Add an additional address to br-vlan
-iface br-vlan inet static
-    # Flat network default gateway
-    # -- This needs to exist somewhere for network reachability
-    # -- from the router namespace for floating IP paths.
-    # -- Putting this here is primarily for tempest to work.
-    address {{host_info[inventory_hostname].VLAN_IP_SECOND}}
-    netmask 255.255.252.0
-    dns-nameserver 8.8.8.8 8.8.4.4
-
-# compute1 Storage bridge
+# OpenStack Storage bridge
 auto br-storage
 iface br-storage inet static
     bridge_stp off
index fbaa8b8..638e78e 100644 (file)
@@ -1,6 +1,3 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
 # The loopback network interface
 auto lo
 iface lo inet loopback
@@ -35,18 +32,14 @@ iface br-mgmt inet static
     netmask 255.255.252.0
 
 # OpenStack Networking VXLAN (tunnel/overlay) bridge
-#
-# Only the COMPUTE and NETWORK nodes must have an IP address
-# on this bridge. When used by infrastructure nodes, the
-# IP addresses are assigned to containers which use this
-# bridge.
-#
 auto br-vxlan
-iface br-vxlan inet manual
+iface br-vxlan inet static
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
     bridge_ports {{ interface }}.30
+    address {{host_info[inventory_hostname].VXLAN_IP}}
+    netmask 255.255.252.0
 
 # OpenStack Networking VLAN bridge
 auto br-vlan
@@ -60,7 +53,7 @@ iface br-vlan inet static
     gateway 192.168.122.1
     dns-nameserver 8.8.8.8 8.8.4.4
 
-# compute1 Storage bridge
+# OpenStack Storage bridge
 auto br-storage
 iface br-storage inet static
     bridge_stp off
index fbaa8b8..e9f8649 100644 (file)
@@ -1,6 +1,3 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
 # The loopback network interface
 auto lo
 iface lo inet loopback
@@ -35,18 +32,14 @@ iface br-mgmt inet static
     netmask 255.255.252.0
 
 # OpenStack Networking VXLAN (tunnel/overlay) bridge
-#
-# Only the COMPUTE and NETWORK nodes must have an IP address
-# on this bridge. When used by infrastructure nodes, the
-# IP addresses are assigned to containers which use this
-# bridge.
-#
 auto br-vxlan
-iface br-vxlan inet manual
+iface br-vxlan inet static
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
     bridge_ports {{ interface }}.30
+    address {{ host_info[inventory_hostname].VXLAN_IP }}
+    netmask 255.255.252.0
 
 # OpenStack Networking VLAN bridge
 auto br-vlan
@@ -60,7 +53,7 @@ iface br-vlan inet static
     gateway 192.168.122.1
     dns-nameserver 8.8.8.8 8.8.4.4
 
-# compute1 Storage bridge
+# OpenStack Storage bridge
 auto br-storage
 iface br-storage inet static
     bridge_stp off
index 2fd9be0..718ed73 100755 (executable)
@@ -50,7 +50,7 @@ echo "-------------------------------------------------------------------------"
 #-------------------------------------------------------------------------------
 # Install ansible on localhost
 #-------------------------------------------------------------------------------
-pip install ansible==$XCI_ANSIBLE_PIP_VERSION
+source file/install-ansible.sh
 
 # TODO: The xci playbooks can be put into a playbook which will be done later.
 
diff --git a/utils/create_pod_file.py b/utils/create_pod_file.py
new file mode 100644 (file)
index 0000000..197e493
--- /dev/null
@@ -0,0 +1,112 @@
+import os
+import yaml
+from opnfv.deployment import factory
+import argparse
+
+
+parser = argparse.ArgumentParser(description='OPNFV POD Info Generator')
+
+parser.add_argument("-t", "--INSTALLER_TYPE", help="Give INSTALLER_TYPE")
+parser.add_argument("-i", "--INSTALLER_IP", help="Give INSTALLER_IP")
+parser.add_argument("-u", "--user", help="Give username of this pod")
+parser.add_argument("-k", "--key", help="Give key file of the user")
+parser.add_argument("-p", "--password", help="Give password of the user")
+parser.add_argument("-f", "--filepath", help="Give dest path of output file")
+args = parser.parse_args()
+
+
+def check_params():
+    """
+    Check all the CLI inputs. Must give INSTALLER_TYPE, INSTALLER_IP, user
+    and filepath of the output file.
+    Need to give key or password.
+    """
+    if not args.INSTALLER_TYPE or not args.INSTALLER_IP or not args.user:
+        print("INSTALLER_TYPE, INSTALLER_IP and user are all needed.")
+        return False
+    if not args.key and not args.password:
+        print("key and password are all None. At least providing one.")
+        return False
+    if not args.filepath:
+        print("Must give the dest path of the output file.")
+        return False
+    return True
+
+
+def get_with_key():
+    """
+    Get handler of the nodes info with key file.
+    """
+    return factory.Factory.get_handler(args.INSTALLER_TYPE, args.INSTALLER_IP,
+                                       args.user, pkey_file=args.key)
+
+
+def get_with_passwd():
+    """
+    Get handler of the nodes info with password.
+    """
+    return factory.Factory.get_handler(args.INSTALLER_TYPE, args.INSTALLER_IP,
+                                       args.user, installer_pwd=args.password)
+
+
+def create_file(handler):
+    """
+    Create the yaml file of nodes info.
+    As Yardstick required, node name must be node1, node2, ... and node1 must
+    be controller.
+    Compass uses password of each node.
+    Other installers use key file of each node.
+    """
+    if not os.path.exists(os.path.dirname(args.filepath)):
+        os.makedirs(os.path.dirname(args.filepath))
+    nodes = handler.nodes
+    node_list = []
+    index = 1
+    for node in nodes:
+        try:
+            if node.roles[0].lower() == "controller":
+                node_info = {'name': "node%s" % index, 'role': node.roles[0],
+                             'ip': node.ip, 'user': 'root'}
+                node_list.append(node_info)
+                index += 1
+        except Exception:
+            node_info = {'name': node.name, 'role': 'unknown', 'ip': node.ip,
+                         'user': 'root'}
+            node_list.append(node_info)
+    for node in nodes:
+        try:
+            if node.roles[0].lower() == "compute":
+                node_info = {'name': "node%s" % index, 'role': node.roles[0],
+                             'ip': node.ip, 'user': 'root'}
+                node_list.append(node_info)
+                index += 1
+        except Exception:
+            node_info = {'name': node.name, 'role': 'unknown', 'ip': node.ip,
+                         'user': 'root'}
+            node_list.append(node_info)
+    if args.INSTALLER_TYPE == 'compass':
+        for item in node_list:
+            item['password'] = 'root'
+    else:
+        for item in node_list:
+            item['key_filename'] = '/root/.ssh/id_rsa'
+    data = {'nodes': node_list}
+    with open(args.filepath, "w") as fw:
+        yaml.dump(data, fw)
+
+
+def main():
+    if not check_params():
+        return 1
+    if args.key:
+        handler = get_with_key()
+    else:
+        handler = get_with_passwd()
+    if not handler:
+        print("Error: failed to get the node's handler.")
+        return 1
+    create_file(handler)
+
+
+if __name__ == '__main__':
+    main()
index c99afac..6a382a5 100755 (executable)
@@ -138,7 +138,7 @@ elif [ "$installer_type" == "apex" ]; then
     if [ -f /root/.ssh/id_rsa ]; then
         chmod 600 /root/.ssh/id_rsa
     fi
-    sudo scp $ssh_options root@$installer_ip:/home/stack/overcloudrc $dest_path
+    sudo scp $ssh_options root@$installer_ip:/home/stack/overcloudrc.v3 $dest_path
 
 elif [ "$installer_type" == "compass" ]; then
     verify_connectivity $installer_ip
index af1d1d8..94e7f2f 100755 (executable)
@@ -9,10 +9,8 @@
 import datetime
 import jinja2
 import os
-import requests
 import sys
 import time
-import yaml
 
 import testCase as tc
 import scenarioResult as sr
@@ -43,9 +41,7 @@ log_level = rp_utils.get_config('general.log.log_level')
 exclude_noha = rp_utils.get_config('functest.exclude_noha')
 exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
 
-response = requests.get(cf)
-
-functest_yaml_config = yaml.safe_load(response.text)
+functest_yaml_config = rp_utils.getFunctestConfig()
 
 logger.info("*******************************************")
 logger.info("*                                         *")
@@ -69,128 +65,117 @@ config_tiers = functest_yaml_config.get("tiers")
 for tier in config_tiers:
     if tier['order'] >= 0 and tier['order'] < 2:
         for case in tier['testcases']:
-            if case['name'] not in blacklist:
-                testValid.append(tc.TestCase(case['name'],
+            if case['case_name'] not in blacklist:
+                testValid.append(tc.TestCase(case['case_name'],
                                              "functest",
                                              case['dependencies']))
     elif tier['order'] == 2:
         for case in tier['testcases']:
-            if case['name'] not in blacklist:
-                testValid.append(tc.TestCase(case['name'],
-                                             case['name'],
+            if case['case_name'] not in blacklist:
+                testValid.append(tc.TestCase(case['case_name'],
+                                             case['case_name'],
                                              case['dependencies']))
     elif tier['order'] > 2:
         for case in tier['testcases']:
-            if case['name'] not in blacklist:
-                otherTestCases.append(tc.TestCase(case['name'],
+            if case['case_name'] not in blacklist:
+                otherTestCases.append(tc.TestCase(case['case_name'],
                                                   "functest",
                                                   case['dependencies']))
 
 logger.debug("Functest reporting start")
+
 # For all the versions
 for version in versions:
     # For all the installers
+    scenario_directory = "./display/" + version + "/functest/"
+    scenario_file_name = scenario_directory + "scenario_history.txt"
+
+    # check that the directory exists, if not create it
+    # (first run on new version)
+    if not os.path.exists(scenario_directory):
+        os.makedirs(scenario_directory)
+
+    # initiate scenario file if it does not exist
+    if not os.path.isfile(scenario_file_name):
+        with open(scenario_file_name, "a") as my_file:
+            logger.debug("Create scenario file: %s" % scenario_file_name)
+            my_file.write("date,scenario,installer,detail,score\n")
+
     for installer in installers:
+
         # get scenarios
         scenario_results = rp_utils.getScenarios(healthcheck,
                                                  installer,
                                                  version)
-        scenario_stats = rp_utils.getScenarioStats(scenario_results)
-        items = {}
-        scenario_result_criteria = {}
-        scenario_directory = "./display/" + version + "/functest/"
-        scenario_file_name = scenario_directory + "scenario_history.txt"
-
-        # check that the directory exists, if not create it
-        # (first run on new version)
-        if not os.path.exists(scenario_directory):
-            os.makedirs(scenario_directory)
-
-        # initiate scenario file if it does not exist
-        if not os.path.isfile(scenario_file_name):
-            with open(scenario_file_name, "a") as my_file:
-                logger.debug("Create scenario file: %s" % scenario_file_name)
-                my_file.write("date,scenario,installer,detail,score\n")
-
-        # For all the scenarios get results
-        for s, s_result in scenario_results.items():
-            logger.info("---------------------------------")
-            logger.info("installer %s, version %s, scenario %s:" %
-                        (installer, version, s))
-            logger.debug("Scenario results: %s" % s_result)
-
-            # Green or Red light for a given scenario
-            nb_test_runnable_for_this_scenario = 0
-            scenario_score = 0
-            # url of the last jenkins log corresponding to a given
-            # scenario
-            s_url = ""
-            if len(s_result) > 0:
-                build_tag = s_result[len(s_result)-1]['build_tag']
-                logger.debug("Build tag: %s" % build_tag)
-                s_url = rp_utils.getJenkinsUrl(build_tag)
-                if s_url is None:
-                    s_url = "http://testresultS.opnfv.org/reporting"
-                logger.info("last jenkins url: %s" % s_url)
-            testCases2BeDisplayed = []
-            # Check if test case is runnable / installer, scenario
-            # for the test case used for Scenario validation
-            try:
-                # 1) Manage the test cases for the scenario validation
-                # concretely Tiers 0-3
-                for test_case in testValid:
-                    test_case.checkRunnable(installer, s,
-                                            test_case.getConstraints())
-                    logger.debug("testcase %s (%s) is %s" %
-                                 (test_case.getDisplayName(),
-                                  test_case.getName(),
-                                  test_case.isRunnable))
-                    time.sleep(1)
-                    if test_case.isRunnable:
-                        dbName = test_case.getDbName()
-                        name = test_case.getName()
-                        displayName = test_case.getDisplayName()
-                        project = test_case.getProject()
-                        nb_test_runnable_for_this_scenario += 1
-                        logger.info(" Searching results for case %s " %
-                                    (displayName))
-                        result = rp_utils.getResult(dbName, installer,
-                                                    s, version)
-                        # if no result set the value to 0
-                        if result < 0:
-                            result = 0
-                        logger.info(" >>>> Test score = " + str(result))
-                        test_case.setCriteria(result)
-                        test_case.setIsRunnable(True)
-                        testCases2BeDisplayed.append(tc.TestCase(name,
-                                                                 project,
-                                                                 "",
-                                                                 result,
-                                                                 True,
-                                                                 1))
-                        scenario_score = scenario_score + result
-
-                # 2) Manage the test cases for the scenario qualification
-                # concretely Tiers > 3
-                for test_case in otherTestCases:
-                    test_case.checkRunnable(installer, s,
-                                            test_case.getConstraints())
-                    logger.debug("testcase %s (%s) is %s" %
-                                 (test_case.getDisplayName(),
-                                  test_case.getName(),
-                                  test_case.isRunnable))
-                    time.sleep(1)
-                    if test_case.isRunnable:
-                        dbName = test_case.getDbName()
-                        name = test_case.getName()
-                        displayName = test_case.getDisplayName()
-                        project = test_case.getProject()
-                        logger.info(" Searching results for case %s " %
-                                    (displayName))
-                        result = rp_utils.getResult(dbName, installer,
-                                                    s, version)
-                        # at least 1 result for the test
-                        if result > -1:
+
+        # get nb of supported architecture (x86, aarch64)
+        architectures = rp_utils.getArchitectures(scenario_results)
+        logger.info("Supported architectures: {}".format(architectures))
+
+        for architecture in architectures:
+            logger.info("architecture: {}".format(architecture))
+            # Consider only the results for the selected architecture
+            # i.e drop x86 for aarch64 and vice versa
+            filter_results = rp_utils.filterArchitecture(scenario_results,
+                                                         architecture)
+            scenario_stats = rp_utils.getScenarioStats(filter_results)
+            items = {}
+            scenario_result_criteria = {}
+
+            # in case of more than 1 architecture supported
+            # precise the architecture
+            installer_display = installer
+            if (len(architectures) > 1):
+                installer_display = installer + "@" + architecture
+
+            # For all the scenarios get results
+            for s, s_result in filter_results.items():
+                logger.info("---------------------------------")
+                logger.info("installer %s, version %s, scenario %s:" %
+                            (installer, version, s))
+                logger.debug("Scenario results: %s" % s_result)
+
+                # Green or Red light for a given scenario
+                nb_test_runnable_for_this_scenario = 0
+                scenario_score = 0
+                # url of the last jenkins log corresponding to a given
+                # scenario
+                s_url = ""
+                if len(s_result) > 0:
+                    build_tag = s_result[len(s_result)-1]['build_tag']
+                    logger.debug("Build tag: %s" % build_tag)
+                    s_url = rp_utils.getJenkinsUrl(build_tag)
+                    if s_url is None:
+                        s_url = "http://testresultS.opnfv.org/reporting"
+                    logger.info("last jenkins url: %s" % s_url)
+                testCases2BeDisplayed = []
+                # Check if test case is runnable / installer, scenario
+                # for the test case used for Scenario validation
+                try:
+                    # 1) Manage the test cases for the scenario validation
+                    # concretely Tiers 0-3
+                    for test_case in testValid:
+                        test_case.checkRunnable(installer, s,
+                                                test_case.getConstraints())
+                        logger.debug("testcase %s (%s) is %s" %
+                                     (test_case.getDisplayName(),
+                                      test_case.getName(),
+                                      test_case.isRunnable))
+                        time.sleep(1)
+                        if test_case.isRunnable:
+                            dbName = test_case.getDbName()
+                            name = test_case.getName()
+                            displayName = test_case.getDisplayName()
+                            project = test_case.getProject()
+                            nb_test_runnable_for_this_scenario += 1
+                            logger.info(" Searching results for case %s " %
+                                        (displayName))
+                            result = rp_utils.getResult(dbName, installer,
+                                                        s, version)
+                            # if no result set the value to 0
+                            if result < 0:
+                                result = 0
+                            logger.info(" >>>> Test score = " + str(result))
                             test_case.setCriteria(result)
                             test_case.setIsRunnable(True)
                             testCases2BeDisplayed.append(tc.TestCase(name,
@@ -198,91 +183,127 @@ for version in versions:
                                                                      "",
                                                                      result,
                                                                      True,
-                                                                     4))
-                        else:
-                            logger.debug("No results found")
-
-                    items[s] = testCases2BeDisplayed
-            except:
-                logger.error("Error: installer %s, version %s, scenario %s" %
-                             (installer, version, s))
-                logger.error("No data available: %s " % (sys.exc_info()[0]))
-
-            # **********************************************
-            # Evaluate the results for scenario validation
-            # **********************************************
-            # the validation criteria = nb runnable tests x 3
-            # because each test case = 0,1,2 or 3
-            scenario_criteria = nb_test_runnable_for_this_scenario * 3
-            # if 0 runnable tests set criteria at a high value
-            if scenario_criteria < 1:
-                scenario_criteria = 50  # conf.MAX_SCENARIO_CRITERIA
-
-            s_score = str(scenario_score) + "/" + str(scenario_criteria)
-            s_score_percent = rp_utils.getScenarioPercent(scenario_score,
-                                                          scenario_criteria)
-
-            s_status = "KO"
-            if scenario_score < scenario_criteria:
-                logger.info(">>>> scenario not OK, score = %s/%s" %
-                            (scenario_score, scenario_criteria))
+                                                                     1))
+                            scenario_score = scenario_score + result
+
+                    # 2) Manage the test cases for the scenario qualification
+                    # concretely Tiers > 3
+                    for test_case in otherTestCases:
+                        test_case.checkRunnable(installer, s,
+                                                test_case.getConstraints())
+                        logger.debug("testcase %s (%s) is %s" %
+                                     (test_case.getDisplayName(),
+                                      test_case.getName(),
+                                      test_case.isRunnable))
+                        time.sleep(1)
+                        if test_case.isRunnable:
+                            dbName = test_case.getDbName()
+                            name = test_case.getName()
+                            displayName = test_case.getDisplayName()
+                            project = test_case.getProject()
+                            logger.info(" Searching results for case %s " %
+                                        (displayName))
+                            result = rp_utils.getResult(dbName, installer,
+                                                        s, version)
+                            # at least 1 result for the test
+                            if result > -1:
+                                test_case.setCriteria(result)
+                                test_case.setIsRunnable(True)
+                                testCases2BeDisplayed.append(tc.TestCase(
+                                    name,
+                                    project,
+                                    "",
+                                    result,
+                                    True,
+                                    4))
+                            else:
+                                logger.debug("No results found")
+
+                        items[s] = testCases2BeDisplayed
+                except:
+                    logger.error("Error: installer %s, version %s, scenario %s"
+                                 % (installer, version, s))
+                    logger.error("No data available: %s" % (sys.exc_info()[0]))
+
+                # **********************************************
+                # Evaluate the results for scenario validation
+                # **********************************************
+                # the validation criteria = nb runnable tests x 3
+                # because each test case = 0,1,2 or 3
+                scenario_criteria = nb_test_runnable_for_this_scenario * 3
+                # if 0 runnable tests set criteria at a high value
+                if scenario_criteria < 1:
+                    scenario_criteria = 50  # conf.MAX_SCENARIO_CRITERIA
+
+                s_score = str(scenario_score) + "/" + str(scenario_criteria)
+                s_score_percent = rp_utils.getScenarioPercent(
+                    scenario_score,
+                    scenario_criteria)
+
                 s_status = "KO"
-            else:
-                logger.info(">>>>> scenario OK, save the information")
-                s_status = "OK"
-                path_validation_file = ("./display/" + version +
-                                        "/functest/" +
-                                        "validated_scenario_history.txt")
-                with open(path_validation_file, "a") as f:
-                    time_format = "%Y-%m-%d %H:%M"
-                    info = (datetime.datetime.now().strftime(time_format) +
-                            ";" + installer + ";" + s + "\n")
+                if scenario_score < scenario_criteria:
+                    logger.info(">>>> scenario not OK, score = %s/%s" %
+                                (scenario_score, scenario_criteria))
+                    s_status = "KO"
+                else:
+                    logger.info(">>>>> scenario OK, save the information")
+                    s_status = "OK"
+                    path_validation_file = ("./display/" + version +
+                                            "/functest/" +
+                                            "validated_scenario_history.txt")
+                    with open(path_validation_file, "a") as f:
+                        time_format = "%Y-%m-%d %H:%M"
+                        info = (datetime.datetime.now().strftime(time_format) +
+                                ";" + installer_display + ";" + s + "\n")
+                        f.write(info)
+
+                # Save daily results in a file
+                with open(scenario_file_name, "a") as f:
+                    info = (reportingDate + "," + s + "," + installer_display +
+                            "," + s_score + "," +
+                            str(round(s_score_percent)) + "\n")
                     f.write(info)
 
-            # Save daily results in a file
-            with open(scenario_file_name, "a") as f:
-                info = (reportingDate + "," + s + "," + installer +
-                        "," + s_score + "," +
-                        str(round(s_score_percent)) + "\n")
-                f.write(info)
-
-            scenario_result_criteria[s] = sr.ScenarioResult(s_status,
-                                                            s_score,
-                                                            s_score_percent,
-                                                            s_url)
-            logger.info("--------------------------")
-
-        templateLoader = jinja2.FileSystemLoader(".")
-        templateEnv = jinja2.Environment(
-            loader=templateLoader, autoescape=True)
-
-        TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
-        template = templateEnv.get_template(TEMPLATE_FILE)
-
-        outputText = template.render(scenario_stats=scenario_stats,
-                                     scenario_results=scenario_result_criteria,
-                                     items=items,
-                                     installer=installer,
-                                     period=period,
-                                     version=version,
-                                     date=reportingDate)
-
-        with open("./display/" + version +
-                  "/functest/status-" + installer + ".html", "wb") as fh:
-            fh.write(outputText)
-
-        logger.info("Manage export CSV & PDF")
-        rp_utils.export_csv(scenario_file_name, installer, version)
-        logger.error("CSV generated...")
-
-        # Generate outputs for export
-        # pdf
-        # TODO Change once web site updated...use the current one
-        # to test pdf production
-        url_pdf = rp_utils.get_config('general.url')
-        pdf_path = ("./display/" + version +
-                    "/functest/status-" + installer + ".html")
-        pdf_doc_name = ("./display/" + version +
-                        "/functest/status-" + installer + ".pdf")
-        rp_utils.export_pdf(pdf_path, pdf_doc_name)
-        logger.info("PDF generated...")
+                scenario_result_criteria[s] = sr.ScenarioResult(
+                    s_status,
+                    s_score,
+                    s_score_percent,
+                    s_url)
+                logger.info("--------------------------")
+
+            templateLoader = jinja2.FileSystemLoader(".")
+            templateEnv = jinja2.Environment(
+                loader=templateLoader, autoescape=True)
+
+            TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
+            template = templateEnv.get_template(TEMPLATE_FILE)
+
+            outputText = template.render(
+                            scenario_stats=scenario_stats,
+                            scenario_results=scenario_result_criteria,
+                            items=items,
+                            installer=installer_display,
+                            period=period,
+                            version=version,
+                            date=reportingDate)
+
+            with open("./display/" + version +
+                      "/functest/status-" +
+                      installer_display + ".html", "wb") as fh:
+                fh.write(outputText)
+
+            logger.info("Manage export CSV & PDF")
+            rp_utils.export_csv(scenario_file_name, installer_display, version)
+            logger.error("CSV generated...")
+
+            # Generate outputs for export
+            # pdf
+            # TODO Change once web site updated...use the current one
+            # to test pdf production
+            url_pdf = rp_utils.get_config('general.url')
+            pdf_path = ("./display/" + version +
+                        "/functest/status-" + installer_display + ".html")
+            pdf_doc_name = ("./display/" + version +
+                            "/functest/status-" + installer_display + ".pdf")
+            rp_utils.export_pdf(pdf_path, pdf_doc_name)
+            logger.info("PDF generated...")
index 52046c3..ebacfd1 100644 (file)
         {% for scenario in scenario_stats.iteritems() -%}
            var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
         {%- endfor %}
-       
+
        // assign success rate to the gauge
        function updateReadings() {
            {% for scenario,iteration in scenario_stats.iteritems() -%}
                gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
             {%- endfor %}
        }
-       updateReadings();                                                                               
+       updateReadings();
         }
-        
+
         // trend line management
-        d3.csv("./scenario_history.csv", function(data) {
+        d3.csv("./scenario_history.txt", function(data) {
        // ***************************************
        // Create the trend line
       {% for scenario,iteration in scenario_stats.iteritems() -%}
-       // for scenario {{scenario}} 
+       // for scenario {{scenario}}
        // Filter results
-        var trend{{loop.index}} = data.filter(function(row) { 
+        var trend{{loop.index}} = data.filter(function(row) {
             return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
        })
-       // Parse the date 
+       // Parse the date
         trend{{loop.index}}.forEach(function(d) {
            d.date = parseDate(d.date);
            d.score = +d.score
@@ -44,7 +44,7 @@
         var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
         // ****************************************
         {%- endfor %}
-    });            
+    });
     if ( !window.isLoaded ) {
         window.addEventListener("load", function() {
                        onDocumentReady();
@@ -61,7 +61,7 @@ $(document).ready(function (){
     });
 })
 </script>
-    
+
   </head>
     <body>
     <div class="container">
@@ -72,8 +72,8 @@ $(document).ready(function (){
             <li class="active"><a href="../../index.html">Home</a></li>
             <li><a href="status-apex.html">Apex</a></li>
             <li><a href="status-compass.html">Compass</a></li>
-            <li><a href="status-daisy.html">Daisy</a></li>
-            <li><a href="status-fuel.html">Fuel</a></li>
+            <li><a href="status-fuel@x86.html">fuel@x86</a></li>
+            <li><a href="status-fuel@aarch64.html">fuel@aarch64</a></li>
             <li><a href="status-joid.html">Joid</a></li>
           </ul>
         </nav>
index 6af87c1..1692f48 100644 (file)
@@ -3,7 +3,6 @@ general:
     installers:
         - apex
         - compass
-        - daisy
         - fuel
         - joid
 
@@ -37,7 +36,6 @@ functest:
     blacklist:
         - ovno
         - security_scan
-        - rally_sanity
         - healthcheck
         - odl_netvirt
         - aaa
@@ -45,13 +43,12 @@ functest:
         - orchestra_ims
         - juju_epc
         - orchestra
-        - promise
     max_scenario_criteria: 50
     test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml
     log_level: ERROR
     jenkins_url: https://build.opnfv.org/ci/view/functest/job/
-    exclude_noha: "False"
-    exclude_virtual: "False"
+    exclude_noha: False
+    exclude_virtual: False
 
 yardstick:
     test_conf: https://git.opnfv.org/cgit/yardstick/plain/tests/ci/report_config.yaml
index 0eae591..599a938 100644 (file)
@@ -10,6 +10,7 @@ from urllib2 import Request, urlopen, URLError
 import logging
 import json
 import os
+import requests
 import pdfkit
 import yaml
 
@@ -327,6 +328,44 @@ def getScenarioPercent(scenario_score, scenario_criteria):
     return score
 
 
+# *********
+# Functest
+# *********
+def getFunctestConfig(version=""):
+    config_file = get_config('functest.test_conf') + version
+    response = requests.get(config_file)
+    return yaml.safe_load(response.text)
+
+
+def getArchitectures(scenario_results):
+    supported_arch = ['x86']
+    if (len(scenario_results) > 0):
+        for scenario_result in scenario_results.values():
+            for value in scenario_result:
+                if ("armband" in value['build_tag']):
+                    supported_arch.append('aarch64')
+                    return supported_arch
+    return supported_arch
+
+
+def filterArchitecture(results, architecture):
+    filtered_results = {}
+    for name, results in results.items():
+        filtered_values = []
+        for value in results:
+            if (architecture is "x86"):
+                # drop aarch64 results
+                if ("armband" not in value['build_tag']):
+                    filtered_values.append(value)
+            elif(architecture is "aarch64"):
+                # drop x86 results
+                if ("armband" in value['build_tag']):
+                    filtered_values.append(value)
+        if (len(filtered_values) > 0):
+            filtered_results[name] = filtered_values
+    return filtered_results
+
+
 # *********
 # Yardstick
 # *********
diff --git a/utils/test/testapi/opnfv_testapi/common/check.py b/utils/test/testapi/opnfv_testapi/common/check.py
new file mode 100644 (file)
index 0000000..be4b1df
--- /dev/null
@@ -0,0 +1,111 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corp
+# feng.xiaowei@zte.com.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import functools
+
+from tornado import web, gen
+
+from opnfv_testapi.common import raises, message
+
+
+def authenticate(method):
+    @web.asynchronous
+    @gen.coroutine
+    @functools.wraps(method)
+    def wrapper(self, *args, **kwargs):
+        if self.auth:
+            try:
+                token = self.request.headers['X-Auth-Token']
+            except KeyError:
+                raises.Unauthorized(message.unauthorized())
+            query = {'access_token': token}
+            check = yield self._eval_db_find_one(query, 'tokens')
+            if not check:
+                raises.Forbidden(message.invalid_token())
+        ret = yield gen.coroutine(method)(self, *args, **kwargs)
+        raise gen.Return(ret)
+    return wrapper
+
+
+def not_exist(xstep):
+    @functools.wraps(xstep)
+    def wrap(self, *args, **kwargs):
+        query = kwargs.get('query')
+        data = yield self._eval_db_find_one(query)
+        if not data:
+            raises.NotFound(message.not_found(self.table, query))
+        ret = yield gen.coroutine(xstep)(self, data, *args, **kwargs)
+        raise gen.Return(ret)
+
+    return wrap
+
+
+def no_body(xstep):
+    @functools.wraps(xstep)
+    def wrap(self, *args, **kwargs):
+        if self.json_args is None:
+            raises.BadRequest(message.no_body())
+        ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+        raise gen.Return(ret)
+
+    return wrap
+
+
+def miss_fields(xstep):
+    @functools.wraps(xstep)
+    def wrap(self, *args, **kwargs):
+        fields = kwargs.get('miss_fields')
+        if fields:
+            for miss in fields:
+                miss_data = self.json_args.get(miss)
+                if miss_data is None or miss_data == '':
+                    raises.BadRequest(message.missing(miss))
+        ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+        raise gen.Return(ret)
+    return wrap
+
+
+def carriers_exist(xstep):
+    @functools.wraps(xstep)
+    def wrap(self, *args, **kwargs):
+        carriers = kwargs.get('carriers')
+        if carriers:
+            for table, query in carriers:
+                exist = yield self._eval_db_find_one(query(), table)
+                if not exist:
+                    raises.Forbidden(message.not_found(table, query()))
+        ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+        raise gen.Return(ret)
+    return wrap
+
+
+def new_not_exists(xstep):
+    @functools.wraps(xstep)
+    def wrap(self, *args, **kwargs):
+        query = kwargs.get('query')
+        if query:
+            to_data = yield self._eval_db_find_one(query())
+            if to_data:
+                raises.Forbidden(message.exist(self.table, query()))
+        ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+        raise gen.Return(ret)
+    return wrap
+
+
+def updated_one_not_exist(xstep):
+    @functools.wraps(xstep)
+    def wrap(self, data, *args, **kwargs):
+        db_keys = kwargs.get('db_keys')
+        query = self._update_query(db_keys, data)
+        if query:
+            to_data = yield self._eval_db_find_one(query)
+            if to_data:
+                raises.Forbidden(message.exist(self.table, query))
+        ret = yield gen.coroutine(xstep)(self, data, *args, **kwargs)
+        raise gen.Return(ret)
+    return wrap
index 522bbe7..955fbbe 100644 (file)
 ##############################################################################
 
 from datetime import datetime
-import functools
 import json
 
 from tornado import gen
 from tornado import web
 
 import models
+from opnfv_testapi.common import check
 from opnfv_testapi.common import message
 from opnfv_testapi.common import raises
 from opnfv_testapi.tornado_swagger import swagger
@@ -73,48 +73,20 @@ class GenericApiHandler(web.RequestHandler):
         cls_data = self.table_cls.from_dict(data)
         return cls_data.format_http()
 
-    def authenticate(method):
-        @web.asynchronous
-        @gen.coroutine
-        @functools.wraps(method)
-        def wrapper(self, *args, **kwargs):
-            if self.auth:
-                try:
-                    token = self.request.headers['X-Auth-Token']
-                except KeyError:
-                    raises.Unauthorized(message.unauthorized())
-                query = {'access_token': token}
-                check = yield self._eval_db_find_one(query, 'tokens')
-                if not check:
-                    raises.Forbidden(message.invalid_token())
-            ret = yield gen.coroutine(method)(self, *args, **kwargs)
-            raise gen.Return(ret)
-        return wrapper
-
-    @authenticate
-    def _create(self, miss_checks, db_checks, **kwargs):
+    @check.authenticate
+    @check.no_body
+    @check.miss_fields
+    @check.carriers_exist
+    @check.new_not_exists
+    def _create(self, **kwargs):
         """
         :param miss_checks: [miss1, miss2]
         :param db_checks: [(table, exist, query, error)]
         """
-        if self.json_args is None:
-            raises.BadRequest(message.no_body())
-
         data = self.table_cls.from_dict(self.json_args)
-        for miss in miss_checks:
-            miss_data = data.__getattribute__(miss)
-            if miss_data is None or miss_data == '':
-                raises.BadRequest(message.missing(miss))
-
         for k, v in kwargs.iteritems():
             data.__setattr__(k, v)
 
-        for table, exist, query, error in db_checks:
-            check = yield self._eval_db_find_one(query(data), table)
-            if (exist and not check) or (not exist and check):
-                code, msg = error(data)
-                raises.CodeTBD(code, msg)
-
         if self.table != 'results':
             data.creation_date = datetime.now()
         _id = yield self._eval_db(self.table, 'insert', data.format(),
@@ -146,47 +118,27 @@ class GenericApiHandler(web.RequestHandler):
 
     @web.asynchronous
     @gen.coroutine
-    def _get_one(self, query):
-        data = yield self._eval_db_find_one(query)
-        if data is None:
-            raises.NotFound(message.not_found(self.table, query))
+    @check.not_exist
+    def _get_one(self, data, query=None):
         self.finish_request(self.format_data(data))
 
-    @authenticate
-    def _delete(self, query):
-        data = yield self._eval_db_find_one(query)
-        if data is None:
-            raises.NotFound(message.not_found(self.table, query))
-
+    @check.authenticate
+    @check.not_exist
+    def _delete(self, data, query=None):
         yield self._eval_db(self.table, 'remove', query)
         self.finish_request()
 
-    @authenticate
-    def _update(self, query, db_keys):
-        if self.json_args is None:
-            raises.BadRequest(message.no_body())
-
-        # check old data exist
-        from_data = yield self._eval_db_find_one(query)
-        if from_data is None:
-            raises.NotFound(message.not_found(self.table, query))
-
-        data = self.table_cls.from_dict(from_data)
-        # check new data exist
-        equal, new_query = self._update_query(db_keys, data)
-        if not equal:
-            to_data = yield self._eval_db_find_one(new_query)
-            if to_data is not None:
-                raises.Forbidden(message.exist(self.table, new_query))
-
-        # we merge the whole document """
-        edit_request = self._update_requests(data)
-
-        """ Updating the DB """
-        yield self._eval_db(self.table, 'update', query, edit_request,
+    @check.authenticate
+    @check.no_body
+    @check.not_exist
+    @check.updated_one_not_exist
+    def _update(self, data, query=None, **kwargs):
+        data = self.table_cls.from_dict(data)
+        update_req = self._update_requests(data)
+        yield self._eval_db(self.table, 'update', query, update_req,
                             check_keys=False)
-        edit_request['_id'] = str(data._id)
-        self.finish_request(edit_request)
+        update_req['_id'] = str(data._id)
+        self.finish_request(update_req)
 
     def _update_requests(self, data):
         request = dict()
@@ -219,13 +171,13 @@ class GenericApiHandler(web.RequestHandler):
         equal = True
         for key in keys:
             new = self.json_args.get(key)
-            old = data.__getattribute__(key)
+            old = data.get(key)
             if new is None:
                 new = old
             elif new != old:
                 equal = False
             query[key] = new
-        return equal, query
+        return query if not equal else dict()
 
     def _eval_db(self, table, method, *args, **kwargs):
         exec_collection = self.db.__getattr__(table)
index 2c303c9..e21841d 100644 (file)
@@ -6,10 +6,7 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-import httplib
-
 import handlers
-from opnfv_testapi.common import message
 from opnfv_testapi.tornado_swagger import swagger
 import pod_models
 
@@ -43,15 +40,10 @@ class PodCLHandler(GenericPodHandler):
             @raise 403: pod already exists
             @raise 400: body or name not provided
         """
-        def query(data):
-            return {'name': data.name}
-
-        def error(data):
-            return httplib.FORBIDDEN, message.exist('pod', data.name)
-
-        miss_checks = ['name']
-        db_checks = [(self.table, False, query, error)]
-        self._create(miss_checks, db_checks)
+        def query():
+            return {'name': self.json_args.get('name')}
+        miss_fields = ['name']
+        self._create(miss_fields=miss_fields, query=query)
 
 
 class PodGURHandler(GenericPodHandler):
@@ -63,9 +55,7 @@ class PodGURHandler(GenericPodHandler):
             @return 200: pod exist
             @raise 404: pod not exist
         """
-        query = dict()
-        query['name'] = pod_name
-        self._get_one(query)
+        self._get_one(query={'name': pod_name})
 
     def delete(self, pod_name):
         """ Remove a POD
index 59e0b88..d79cd3b 100644 (file)
@@ -6,10 +6,8 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-import httplib
 
 import handlers
-from opnfv_testapi.common import message
 from opnfv_testapi.tornado_swagger import swagger
 import project_models
 
@@ -45,15 +43,10 @@ class ProjectCLHandler(GenericProjectHandler):
             @raise 403: project already exists
             @raise 400:  body or name not provided
         """
-        def query(data):
-            return {'name': data.name}
-
-        def error(data):
-            return httplib.FORBIDDEN, message.exist('project', data.name)
-
-        miss_checks = ['name']
-        db_checks = [(self.table, False, query, error)]
-        self._create(miss_checks, db_checks)
+        def query():
+            return {'name': self.json_args.get('name')}
+        miss_fields = ['name']
+        self._create(miss_fields=miss_fields, query=query)
 
 
 class ProjectGURHandler(GenericProjectHandler):
@@ -65,7 +58,7 @@ class ProjectGURHandler(GenericProjectHandler):
             @return 200: project exist
             @raise 404: project not exist
         """
-        self._get_one({'name': project_name})
+        self._get_one(query={'name': project_name})
 
     @swagger.operation(nickname="updateProjectByName")
     def put(self, project_name):
@@ -81,7 +74,7 @@ class ProjectGURHandler(GenericProjectHandler):
         """
         query = {'name': project_name}
         db_keys = ['name']
-        self._update(querydb_keys)
+        self._update(query=query, db_keys=db_keys)
 
     @swagger.operation(nickname='deleteProjectByName')
     def delete(self, project_name):
@@ -90,4 +83,4 @@ class ProjectGURHandler(GenericProjectHandler):
             @return 200: delete success
             @raise 404: project not exist
         """
-        self._delete({'name': project_name})
+        self._delete(query={'name': project_name})
index fb5ed9e..214706f 100644 (file)
@@ -8,7 +8,6 @@
 ##############################################################################
 from datetime import datetime
 from datetime import timedelta
-import httplib
 
 from bson import objectid
 
@@ -127,7 +126,9 @@ class ResultsCLHandler(GenericResultHandler):
         if last is not None:
             last = self.get_int('last', last)
 
-        self._list(self.set_query(), sort=[('start_date', -1)], last=last)
+        self._list(query=self.set_query(),
+                   sort=[('start_date', -1)],
+                   last=last)
 
     @swagger.operation(nickname="createTestResult")
     def post(self):
@@ -141,31 +142,21 @@ class ResultsCLHandler(GenericResultHandler):
             @raise 404: pod/project/testcase not exist
             @raise 400: body/pod_name/project_name/case_name not provided
         """
-        def pod_query(data):
-            return {'name': data.pod_name}
+        def pod_query():
+            return {'name': self.json_args.get('pod_name')}
 
-        def pod_error(data):
-            return httplib.FORBIDDEN, message.not_found('pod', data.pod_name)
+        def project_query():
+            return {'name': self.json_args.get('project_name')}
 
-        def project_query(data):
-            return {'name': data.project_name}
+        def testcase_query():
+            return {'project_name': self.json_args.get('project_name'),
+                    'name': self.json_args.get('case_name')}
 
-        def project_error(data):
-            return httplib.FORBIDDEN, message.not_found('project',
-                                                        data.project_name)
-
-        def testcase_query(data):
-            return {'project_name': data.project_name, 'name': data.case_name}
-
-        def testcase_error(data):
-            return httplib.FORBIDDEN, message.not_found('testcase',
-                                                        data.case_name)
-
-        miss_checks = ['pod_name', 'project_name', 'case_name']
-        db_checks = [('pods', True, pod_query, pod_error),
-                     ('projects', True, project_query, project_error),
-                     ('testcases', True, testcase_query, testcase_error)]
-        self._create(miss_checks, db_checks)
+        miss_fields = ['pod_name', 'project_name', 'case_name']
+        carriers = [('pods', pod_query),
+                    ('projects', project_query),
+                    ('testcases', testcase_query)]
+        self._create(miss_fields=miss_fields, carriers=carriers)
 
 
 class ResultsGURHandler(GenericResultHandler):
@@ -179,7 +170,7 @@ class ResultsGURHandler(GenericResultHandler):
         """
         query = dict()
         query["_id"] = objectid.ObjectId(result_id)
-        self._get_one(query)
+        self._get_one(query=query)
 
     @swagger.operation(nickname="updateTestResultById")
     def put(self, result_id):
@@ -195,4 +186,4 @@ class ResultsGURHandler(GenericResultHandler):
         """
         query = {'_id': objectid.ObjectId(result_id)}
         db_keys = []
-        self._update(querydb_keys)
+        self._update(query=query, db_keys=db_keys)
index bad79fd..5d420a5 100644 (file)
@@ -1,5 +1,4 @@
 import functools
-import httplib
 
 from opnfv_testapi.common import message
 from opnfv_testapi.common import raises
@@ -65,7 +64,7 @@ class ScenariosCLHandler(GenericScenarioHandler):
                 query['installers'] = {'$elemMatch': elem_query}
             return query
 
-        self._list(_set_query())
+        self._list(query=_set_query())
 
     @swagger.operation(nickname="createScenario")
     def post(self):
@@ -79,15 +78,10 @@ class ScenariosCLHandler(GenericScenarioHandler):
             @raise 403: scenario already exists
             @raise 400:  body or name not provided
         """
-        def query(data):
-            return {'name': data.name}
-
-        def error(data):
-            return httplib.FORBIDDEN, message.exist('scenario', data.name)
-
-        miss_checks = ['name']
-        db_checks = [(self.table, False, query, error)]
-        self._create(miss_checks=miss_checks, db_checks=db_checks)
+        def query():
+            return {'name': self.json_args.get('name')}
+        miss_fields = ['name']
+        self._create(miss_fields=miss_fields, query=query)
 
 
 class ScenarioGURHandler(GenericScenarioHandler):
@@ -99,7 +93,7 @@ class ScenarioGURHandler(GenericScenarioHandler):
             @return 200: scenario exist
             @raise 404: scenario not exist
         """
-        self._get_one({'name': name})
+        self._get_one(query={'name': name})
         pass
 
     @swagger.operation(nickname="updateScenarioByName")
@@ -116,7 +110,7 @@ class ScenarioGURHandler(GenericScenarioHandler):
         """
         query = {'name': name}
         db_keys = ['name']
-        self._update(querydb_keys)
+        self._update(query=query, db_keys=db_keys)
 
     @swagger.operation(nickname="deleteScenarioByName")
     def delete(self, name):
@@ -126,19 +120,16 @@ class ScenarioGURHandler(GenericScenarioHandler):
         @raise 404: scenario not exist:
         """
 
-        query = {'name': name}
-        self._delete(query)
+        self._delete(query={'name': name})
 
     def _update_query(self, keys, data):
         query = dict()
-        equal = True
         if self._is_rename():
             new = self._term.get('name')
-            if data.name != new:
-                equal = False
+            if data.get('name') != new:
                 query['name'] = new
 
-        return equal, query
+        return query
 
     def _update_requests(self, data):
         updates = {
index bc22b74..9399326 100644 (file)
@@ -6,9 +6,7 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-import httplib
 
-from opnfv_testapi.common import message
 from opnfv_testapi.resources import handlers
 from opnfv_testapi.resources import testcase_models
 from opnfv_testapi.tornado_swagger import swagger
@@ -32,9 +30,7 @@ class TestcaseCLHandler(GenericTestcaseHandler):
                          empty list is no testcase exist in this project
             @rtype: L{TestCases}
         """
-        query = dict()
-        query['project_name'] = project_name
-        self._list(query)
+        self._list(query={'project_name': project_name})
 
     @swagger.operation(nickname="createTestCase")
     def post(self, project_name):
@@ -49,26 +45,18 @@ class TestcaseCLHandler(GenericTestcaseHandler):
                         or testcase already exists in this project
             @raise 400: body or name not provided
         """
-        def p_query(data):
-            return {'name': data.project_name}
-
-        def tc_query(data):
-            return {
-                'project_name': data.project_name,
-                'name': data.name
-            }
-
-        def p_error(data):
-            return httplib.FORBIDDEN, message.not_found('project',
-                                                        data.project_name)
-
-        def tc_error(data):
-            return httplib.FORBIDDEN, message.exist('testcase', data.name)
+        def project_query():
+            return {'name': project_name}
 
-        miss_checks = ['name']
-        db_checks = [(self.db_projects, True, p_query, p_error),
-                     (self.db_testcases, False, tc_query, tc_error)]
-        self._create(miss_checks, db_checks, project_name=project_name)
+        def testcase_query():
+            return {'project_name': project_name,
+                    'name': self.json_args.get('name')}
+        miss_fields = ['name']
+        carriers = [(self.db_projects, project_query)]
+        self._create(miss_fields=miss_fields,
+                     carriers=carriers,
+                     query=testcase_query,
+                     project_name=project_name)
 
 
 class TestcaseGURHandler(GenericTestcaseHandler):
@@ -84,7 +72,7 @@ class TestcaseGURHandler(GenericTestcaseHandler):
         query = dict()
         query['project_name'] = project_name
         query["name"] = case_name
-        self._get_one(query)
+        self._get_one(query=query)
 
     @swagger.operation(nickname="updateTestCaseByName")
     def put(self, project_name, case_name):
@@ -102,7 +90,7 @@ class TestcaseGURHandler(GenericTestcaseHandler):
         """
         query = {'project_name': project_name, 'name': case_name}
         db_keys = ['name', 'project_name']
-        self._update(querydb_keys)
+        self._update(query=query, db_keys=db_keys)
 
     @swagger.operation(nickname='deleteTestCaseByName')
     def delete(self, project_name, case_name):
@@ -112,4 +100,4 @@ class TestcaseGURHandler(GenericTestcaseHandler):
             @raise 404: testcase not exist
         """
         query = {'project_name': project_name, 'name': case_name}
-        self._delete(query)
+        self._delete(query=query)
index 8cc3c6c..9b8f4b5 100644 (file)
@@ -13,12 +13,13 @@ from opnfv_testapi.tornado_swagger import swagger
 @swagger.model()
 class TestcaseCreateRequest(models.ModelBase):
     def __init__(self, name, url=None, description=None,
-                 tier=None, ci_loop=None, criteria=None,
-                 blocking=None, dependencies=None, run=None,
+                 catalog_description=None, tier=None, ci_loop=None,
+                 criteria=None, blocking=None, dependencies=None, run=None,
                  domains=None, tags=None, version=None):
         self.name = name
         self.url = url
         self.description = description
+        self.catalog_description = catalog_description
         self.tier = tier
         self.ci_loop = ci_loop
         self.criteria = criteria
@@ -34,11 +35,12 @@ class TestcaseCreateRequest(models.ModelBase):
 @swagger.model()
 class TestcaseUpdateRequest(models.ModelBase):
     def __init__(self, name=None, description=None, project_name=None,
-                 tier=None, ci_loop=None, criteria=None,
-                 blocking=None, dependencies=None, run=None,
+                 catalog_description=None, tier=None, ci_loop=None,
+                 criteria=None, blocking=None, dependencies=None, run=None,
                  domains=None, tags=None, version=None, trust=None):
         self.name = name
         self.description = description
+        self.catalog_description = catalog_description
         self.project_name = project_name
         self.tier = tier
         self.ci_loop = ci_loop
@@ -56,14 +58,15 @@ class TestcaseUpdateRequest(models.ModelBase):
 class Testcase(models.ModelBase):
     def __init__(self, _id=None, name=None, project_name=None,
                  description=None, url=None, creation_date=None,
-                 tier=None, ci_loop=None, criteria=None,
-                 blocking=None, dependencies=None, run=None,
+                 catalog_description=None, tier=None, ci_loop=None,
+                 criteria=None, blocking=None, dependencies=None, run=None,
                  domains=None, tags=None, version=None,
                  trust=None):
         self._id = None
         self.name = None
         self.project_name = None
         self.description = None
+        self.catalog_description = None
         self.url = None
         self.creation_date = None
         self.tier = None
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/executor.py b/utils/test/testapi/opnfv_testapi/tests/unit/executor.py
new file mode 100644 (file)
index 0000000..b30c325
--- /dev/null
@@ -0,0 +1,83 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corp
+# feng.xiaowei@zte.com.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import functools
+import httplib
+
+
+def create(excepted_status, excepted_response):
+    def _create(create_request):
+        @functools.wraps(create_request)
+        def wrap(self):
+            request = create_request(self)
+            status, body = self.create(request)
+            if excepted_status == httplib.OK:
+                getattr(self, excepted_response)(body)
+            else:
+                self.assertIn(excepted_response, body)
+        return wrap
+    return _create
+
+
+def get(excepted_status, excepted_response):
+    def _get(get_request):
+        @functools.wraps(get_request)
+        def wrap(self):
+            request = get_request(self)
+            status, body = self.get(request)
+            if excepted_status == httplib.OK:
+                getattr(self, excepted_response)(body)
+            else:
+                self.assertIn(excepted_response, body)
+        return wrap
+    return _get
+
+
+def update(excepted_status, excepted_response):
+    def _update(update_request):
+        @functools.wraps(update_request)
+        def wrap(self):
+            request, resource = update_request(self)
+            status, body = self.update(request, resource)
+            if excepted_status == httplib.OK:
+                getattr(self, excepted_response)(request, body)
+            else:
+                self.assertIn(excepted_response, body)
+        return wrap
+    return _update
+
+
+def delete(excepted_status, excepted_response):
+    def _delete(delete_request):
+        @functools.wraps(delete_request)
+        def wrap(self):
+            request = delete_request(self)
+            if isinstance(request, tuple):
+                status, body = self.delete(request[0], *(request[1]))
+            else:
+                status, body = self.delete(request)
+            if excepted_status == httplib.OK:
+                getattr(self, excepted_response)(body)
+            else:
+                self.assertIn(excepted_response, body)
+        return wrap
+    return _delete
+
+
+def query(excepted_status, excepted_response, number=0):
+    def _query(get_request):
+        @functools.wraps(get_request)
+        def wrap(self):
+            request = get_request(self)
+            status, body = self.query(request)
+            if excepted_status == httplib.OK:
+                getattr(self, excepted_response)(body, number)
+            else:
+                self.assertIn(excepted_response, body)
+        return wrap
+    return _query
index b955f4a..a6e7339 100644 (file)
@@ -12,9 +12,9 @@ from os import path
 import mock
 from tornado import testing
 
-import fake_pymongo
 from opnfv_testapi.cmd import server
 from opnfv_testapi.resources import models
+from opnfv_testapi.tests.unit import fake_pymongo
 
 
 class TestBase(testing.AsyncHTTPTestCase):
index 7c43fca..1ebc96f 100644 (file)
@@ -12,7 +12,7 @@ from tornado import gen
 from tornado import testing
 from tornado import web
 
-import fake_pymongo
+from opnfv_testapi.tests.unit import fake_pymongo
 
 
 class MyTest(testing.AsyncHTTPTestCase):
index cae86e8..0ed348d 100644 (file)
@@ -11,7 +11,8 @@ import unittest
 
 from opnfv_testapi.common import message
 from opnfv_testapi.resources import pod_models
-import test_base as base
+from opnfv_testapi.tests.unit import executor
+from opnfv_testapi.tests.unit import test_base as base
 
 
 class TestPodBase(base.TestBase):
@@ -36,48 +37,47 @@ class TestPodBase(base.TestBase):
 
 
 class TestPodCreate(TestPodBase):
+    @executor.create(httplib.BAD_REQUEST, message.no_body())
     def test_withoutBody(self):
-        (code, body) = self.create()
-        self.assertEqual(code, httplib.BAD_REQUEST)
+        return None
 
+    @executor.create(httplib.BAD_REQUEST, message.missing('name'))
     def test_emptyName(self):
-        req_empty = pod_models.PodCreateRequest('')
-        (code, body) = self.create(req_empty)
-        self.assertEqual(code, httplib.BAD_REQUEST)
-        self.assertIn(message.missing('name'), body)
+        return pod_models.PodCreateRequest('')
 
+    @executor.create(httplib.BAD_REQUEST, message.missing('name'))
     def test_noneName(self):
-        req_none = pod_models.PodCreateRequest(None)
-        (code, body) = self.create(req_none)
-        self.assertEqual(code, httplib.BAD_REQUEST)
-        self.assertIn(message.missing('name'), body)
+        return pod_models.PodCreateRequest(None)
 
+    @executor.create(httplib.OK, 'assert_create_body')
     def test_success(self):
-        code, body = self.create_d()
-        self.assertEqual(code, httplib.OK)
-        self.assert_create_body(body)
+        return self.req_d
 
+    @executor.create(httplib.FORBIDDEN, message.exist_base)
     def test_alreadyExist(self):
         self.create_d()
-        code, body = self.create_d()
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.exist_base, body)
+        return self.req_d
 
 
 class TestPodGet(TestPodBase):
+    def setUp(self):
+        super(TestPodGet, self).setUp()
+        self.create_d()
+        self.create_e()
+
+    @executor.get(httplib.NOT_FOUND, message.not_found_base)
     def test_notExist(self):
-        code, body = self.get('notExist')
-        self.assertEqual(code, httplib.NOT_FOUND)
+        return 'notExist'
 
+    @executor.get(httplib.OK, 'assert_get_body')
     def test_getOne(self):
-        self.create_d()
-        code, body = self.get(self.req_d.name)
-        self.assert_get_body(body)
+        return self.req_d.name
 
+    @executor.get(httplib.OK, '_assert_list')
     def test_list(self):
-        self.create_d()
-        self.create_e()
-        code, body = self.get()
+        return None
+
+    def _assert_list(self, body):
         self.assertEqual(len(body.pods), 2)
         for pod in body.pods:
             if self.req_d.name == pod.name:
index 74cefd7..323a116 100644 (file)
@@ -1,17 +1,10 @@
-##############################################################################
-# Copyright (c) 2016 ZTE Corporation
-# feng.xiaowei@zte.com.cn
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
 import httplib
 import unittest
 
 from opnfv_testapi.common import message
 from opnfv_testapi.resources import project_models
-import test_base as base
+from opnfv_testapi.tests.unit import executor
+from opnfv_testapi.tests.unit import test_base as base
 
 
 class TestProjectBase(base.TestBase):
@@ -36,49 +29,47 @@ class TestProjectBase(base.TestBase):
 
 
 class TestProjectCreate(TestProjectBase):
+    @executor.create(httplib.BAD_REQUEST, message.no_body())
     def test_withoutBody(self):
-        (code, body) = self.create()
-        self.assertEqual(code, httplib.BAD_REQUEST)
+        return None
 
+    @executor.create(httplib.BAD_REQUEST, message.missing('name'))
     def test_emptyName(self):
-        req_empty = project_models.ProjectCreateRequest('')
-        (code, body) = self.create(req_empty)
-        self.assertEqual(code, httplib.BAD_REQUEST)
-        self.assertIn(message.missing('name'), body)
+        return project_models.ProjectCreateRequest('')
 
+    @executor.create(httplib.BAD_REQUEST, message.missing('name'))
     def test_noneName(self):
-        req_none = project_models.ProjectCreateRequest(None)
-        (code, body) = self.create(req_none)
-        self.assertEqual(code, httplib.BAD_REQUEST)
-        self.assertIn(message.missing('name'), body)
+        return project_models.ProjectCreateRequest(None)
 
+    @executor.create(httplib.OK, 'assert_create_body')
     def test_success(self):
-        (code, body) = self.create_d()
-        self.assertEqual(code, httplib.OK)
-        self.assert_create_body(body)
+        return self.req_d
 
+    @executor.create(httplib.FORBIDDEN, message.exist_base)
     def test_alreadyExist(self):
         self.create_d()
-        (code, body) = self.create_d()
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.exist_base, body)
+        return self.req_d
 
 
 class TestProjectGet(TestProjectBase):
+    def setUp(self):
+        super(TestProjectGet, self).setUp()
+        self.create_d()
+        self.create_e()
+
+    @executor.get(httplib.NOT_FOUND, message.not_found_base)
     def test_notExist(self):
-        code, body = self.get('notExist')
-        self.assertEqual(code, httplib.NOT_FOUND)
+        return 'notExist'
 
+    @executor.get(httplib.OK, 'assert_body')
     def test_getOne(self):
-        self.create_d()
-        code, body = self.get(self.req_d.name)
-        self.assertEqual(code, httplib.OK)
-        self.assert_body(body)
+        return self.req_d.name
 
+    @executor.get(httplib.OK, '_assert_list')
     def test_list(self):
-        self.create_d()
-        self.create_e()
-        code, body = self.get()
+        return None
+
+    def _assert_list(self, body):
         for project in body.projects:
             if self.req_d.name == project.name:
                 self.assert_body(project)
@@ -87,54 +78,57 @@ class TestProjectGet(TestProjectBase):
 
 
 class TestProjectUpdate(TestProjectBase):
+    def setUp(self):
+        super(TestProjectUpdate, self).setUp()
+        _, d_body = self.create_d()
+        _, get_res = self.get(self.req_d.name)
+        self.index_d = get_res._id
+        self.create_e()
+
+    @executor.update(httplib.BAD_REQUEST, message.no_body())
     def test_withoutBody(self):
-        code, _ = self.update(None, 'noBody')
-        self.assertEqual(code, httplib.BAD_REQUEST)
+        return None, 'noBody'
 
+    @executor.update(httplib.NOT_FOUND, message.not_found_base)
     def test_notFound(self):
-        code, _ = self.update(self.req_e, 'notFound')
-        self.assertEqual(code, httplib.NOT_FOUND)
+        return self.req_e, 'notFound'
 
+    @executor.update(httplib.FORBIDDEN, message.exist_base)
     def test_newNameExist(self):
-        self.create_d()
-        self.create_e()
-        code, body = self.update(self.req_e, self.req_d.name)
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.exist_base, body)
+        return self.req_e, self.req_d.name
 
+    @executor.update(httplib.FORBIDDEN, message.no_update())
     def test_noUpdate(self):
-        self.create_d()
-        code, body = self.update(self.req_d, self.req_d.name)
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.no_update(), body)
+        return self.req_d, self.req_d.name
 
+    @executor.update(httplib.OK, '_assert_update')
     def test_success(self):
-        self.create_d()
-        code, body = self.get(self.req_d.name)
-        _id = body._id
-
         req = project_models.ProjectUpdateRequest('newName', 'new description')
-        code, body = self.update(req, self.req_d.name)
-        self.assertEqual(code, httplib.OK)
-        self.assertEqual(_id, body._id)
-        self.assert_body(body, req)
+        return req, self.req_d.name
 
+    def _assert_update(self, req, body):
+        self.assertEqual(self.index_d, body._id)
+        self.assert_body(body, req)
         _, new_body = self.get(req.name)
-        self.assertEqual(_id, new_body._id)
+        self.assertEqual(self.index_d, new_body._id)
         self.assert_body(new_body, req)
 
 
 class TestProjectDelete(TestProjectBase):
+    def setUp(self):
+        super(TestProjectDelete, self).setUp()
+        self.create_d()
+
+    @executor.delete(httplib.NOT_FOUND, message.not_found_base)
     def test_notFound(self):
-        code, body = self.delete('notFound')
-        self.assertEqual(code, httplib.NOT_FOUND)
+        return 'notFound'
 
+    @executor.delete(httplib.OK, '_assert_delete')
     def test_success(self):
-        self.create_d()
-        code, body = self.delete(self.req_d.name)
-        self.assertEqual(code, httplib.OK)
-        self.assertEqual(body, '')
+        return self.req_d.name
 
+    def _assert_delete(self, body):
+        self.assertEqual(body, '')
         code, body = self.get(self.req_d.name)
         self.assertEqual(code, httplib.NOT_FOUND)
 
index 2e0aa36..ef2ce30 100644 (file)
@@ -16,7 +16,8 @@ from opnfv_testapi.resources import pod_models
 from opnfv_testapi.resources import project_models
 from opnfv_testapi.resources import result_models
 from opnfv_testapi.resources import testcase_models
-import test_base as base
+from opnfv_testapi.tests.unit import test_base as base
+from opnfv_testapi.tests.unit import executor
 
 
 class Details(object):
@@ -99,8 +100,7 @@ class TestResultBase(base.TestBase):
                          self.req_testcase,
                          self.project)
 
-    def assert_res(self, code, result, req=None):
-        self.assertEqual(code, httplib.OK)
+    def assert_res(self, result, req=None):
         if req is None:
             req = self.req_d
         self.assertEqual(result.pod_name, req.pod_name)
@@ -133,65 +133,57 @@ class TestResultBase(base.TestBase):
 
 
 class TestResultCreate(TestResultBase):
+    @executor.create(httplib.BAD_REQUEST, message.no_body())
     def test_nobody(self):
-        (code, body) = self.create(None)
-        self.assertEqual(code, httplib.BAD_REQUEST)
-        self.assertIn(message.no_body(), body)
+        return None
 
+    @executor.create(httplib.BAD_REQUEST, message.missing('pod_name'))
     def test_podNotProvided(self):
         req = self.req_d
         req.pod_name = None
-        (code, body) = self.create(req)
-        self.assertEqual(code, httplib.BAD_REQUEST)
-        self.assertIn(message.missing('pod_name'), body)
+        return req
 
+    @executor.create(httplib.BAD_REQUEST, message.missing('project_name'))
     def test_projectNotProvided(self):
         req = self.req_d
         req.project_name = None
-        (code, body) = self.create(req)
-        self.assertEqual(code, httplib.BAD_REQUEST)
-        self.assertIn(message.missing('project_name'), body)
+        return req
 
+    @executor.create(httplib.BAD_REQUEST, message.missing('case_name'))
     def test_testcaseNotProvided(self):
         req = self.req_d
         req.case_name = None
-        (code, body) = self.create(req)
-        self.assertEqual(code, httplib.BAD_REQUEST)
-        self.assertIn(message.missing('case_name'), body)
+        return req
 
+    @executor.create(httplib.FORBIDDEN, message.not_found_base)
     def test_noPod(self):
         req = self.req_d
         req.pod_name = 'notExistPod'
-        (code, body) = self.create(req)
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.not_found_base, body)
+        return req
 
+    @executor.create(httplib.FORBIDDEN, message.not_found_base)
     def test_noProject(self):
         req = self.req_d
         req.project_name = 'notExistProject'
-        (code, body) = self.create(req)
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.not_found_base, body)
+        return req
 
+    @executor.create(httplib.FORBIDDEN, message.not_found_base)
     def test_noTestcase(self):
         req = self.req_d
         req.case_name = 'notExistTestcase'
-        (code, body) = self.create(req)
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.not_found_base, body)
+        return req
 
+    @executor.create(httplib.OK, 'assert_href')
     def test_success(self):
-        (code, body) = self.create_d()
-        self.assertEqual(code, httplib.OK)
-        self.assert_href(body)
+        return self.req_d
 
+    @executor.create(httplib.OK, 'assert_href')
     def test_key_with_doc(self):
         req = copy.deepcopy(self.req_d)
         req.details = {'1.name': 'dot_name'}
-        (code, body) = self.create(req)
-        self.assertEqual(code, httplib.OK)
-        self.assert_href(body)
+        return req
 
+    @executor.create(httplib.OK, '_assert_no_ti')
     def test_no_ti(self):
         req = result_models.ResultCreateRequest(pod_name=self.pod,
                                                 project_name=self.project,
@@ -204,106 +196,110 @@ class TestResultCreate(TestResultBase):
                                                 build_tag=self.build_tag,
                                                 scenario=self.scenario,
                                                 criteria=self.criteria)
-        (code, res) = self.create(req)
-        _id = res.href.split('/')[-1]
-        self.assertEqual(code, httplib.OK)
+        self.actual_req = req
+        return req
+
+    def _assert_no_ti(self, body):
+        _id = body.href.split('/')[-1]
         code, body = self.get(_id)
-        self.assert_res(code, body, req)
+        self.assert_res(body, self.actual_req)
 
 
 class TestResultGet(TestResultBase):
+    def setUp(self):
+        super(TestResultGet, self).setUp()
+        self.req_d_id = self._create_d()
+        self.req_10d_later = self._create_changed_date(days=10)
+        self.req_10d_before = self._create_changed_date(days=-10)
+
+    @executor.get(httplib.OK, 'assert_res')
     def test_getOne(self):
-        _id = self._create_d()
-        code, body = self.get(_id)
-        self.assert_res(code, body)
+        return self.req_d_id
 
+    @executor.query(httplib.OK, '_query_success', 3)
     def test_queryPod(self):
-        self._query_and_assert(self._set_query('pod'))
+        return self._set_query('pod')
 
+    @executor.query(httplib.OK, '_query_success', 3)
     def test_queryProject(self):
-        self._query_and_assert(self._set_query('project'))
+        return self._set_query('project')
 
+    @executor.query(httplib.OK, '_query_success', 3)
     def test_queryTestcase(self):
-        self._query_and_assert(self._set_query('case'))
+        return self._set_query('case')
 
+    @executor.query(httplib.OK, '_query_success', 3)
     def test_queryVersion(self):
-        self._query_and_assert(self._set_query('version'))
+        return self._set_query('version')
 
+    @executor.query(httplib.OK, '_query_success', 3)
     def test_queryInstaller(self):
-        self._query_and_assert(self._set_query('installer'))
+        return self._set_query('installer')
 
+    @executor.query(httplib.OK, '_query_success', 3)
     def test_queryBuildTag(self):
-        self._query_and_assert(self._set_query('build_tag'))
+        return self._set_query('build_tag')
 
+    @executor.query(httplib.OK, '_query_success', 3)
     def test_queryScenario(self):
-        self._query_and_assert(self._set_query('scenario'))
+        return self._set_query('scenario')
 
+    @executor.query(httplib.OK, '_query_success', 3)
     def test_queryTrustIndicator(self):
-        self._query_and_assert(self._set_query('trust_indicator'))
+        return self._set_query('trust_indicator')
 
+    @executor.query(httplib.OK, '_query_success', 3)
     def test_queryCriteria(self):
-        self._query_and_assert(self._set_query('criteria'))
+        return self._set_query('criteria')
 
+    @executor.query(httplib.BAD_REQUEST, message.must_int('period'))
     def test_queryPeriodNotInt(self):
-        code, body = self.query(self._set_query('period=a'))
-        self.assertEqual(code, httplib.BAD_REQUEST)
-        self.assertIn('period must be int', body)
-
-    def test_queryPeriodFail(self):
-        self._query_and_assert(self._set_query('period=1'),
-                               found=False, days=-10)
+        return self._set_query('period=a')
 
+    @executor.query(httplib.OK, '_query_last_one', 1)
     def test_queryPeriodSuccess(self):
-        self._query_and_assert(self._set_query('period=1'),
-                               found=True)
+        return self._set_query('period=1')
 
+    @executor.query(httplib.BAD_REQUEST, message.must_int('last'))
     def test_queryLastNotInt(self):
-        code, body = self.query(self._set_query('last=a'))
-        self.assertEqual(code, httplib.BAD_REQUEST)
-        self.assertIn('last must be int', body)
+        return self._set_query('last=a')
 
+    @executor.query(httplib.OK, '_query_last_one', 1)
     def test_queryLast(self):
-        self._create_changed_date()
-        req = self._create_changed_date(minutes=20)
-        self._create_changed_date(minutes=-20)
-        self._query_and_assert(self._set_query('last=1'), req=req)
+        return self._set_query('last=1')
 
+    @executor.query(httplib.OK, '_query_last_one', 1)
     def test_combination(self):
-        self._query_and_assert(self._set_query('pod',
-                                               'project',
-                                               'case',
-                                               'version',
-                                               'installer',
-                                               'build_tag',
-                                               'scenario',
-                                               'trust_indicator',
-                                               'criteria',
-                                               'period=1'))
-
+        return self._set_query('pod',
+                               'project',
+                               'case',
+                               'version',
+                               'installer',
+                               'build_tag',
+                               'scenario',
+                               'trust_indicator',
+                               'criteria',
+                               'period=1')
+
+    @executor.query(httplib.OK, '_query_success', 0)
     def test_notFound(self):
-        self._query_and_assert(self._set_query('pod=notExistPod',
-                                               'project',
-                                               'case',
-                                               'version',
-                                               'installer',
-                                               'build_tag',
-                                               'scenario',
-                                               'trust_indicator',
-                                               'criteria',
-                                               'period=1'),
-                               found=False)
-
-    def _query_and_assert(self, query, found=True, req=None, **kwargs):
-        if req is None:
-            req = self._create_changed_date(**kwargs)
-        code, body = self.query(query)
-        if not found:
-            self.assertEqual(code, httplib.OK)
-            self.assertEqual(0, len(body.results))
-        else:
-            self.assertEqual(1, len(body.results))
-            for result in body.results:
-                self.assert_res(code, result, req)
+        return self._set_query('pod=notExistPod',
+                               'project',
+                               'case',
+                               'version',
+                               'installer',
+                               'build_tag',
+                               'scenario',
+                               'trust_indicator',
+                               'criteria',
+                               'period=1')
+
+    def _query_success(self, body, number):
+        self.assertEqual(number, len(body.results))
+
+    def _query_last_one(self, body, number):
+        self.assertEqual(number, len(body.results))
+        self.assert_res(body.results[0], self.req_10d_later)
 
     def _create_changed_date(self, **kwargs):
         req = copy.deepcopy(self.req_d)
@@ -327,9 +323,12 @@ class TestResultGet(TestResultBase):
 
 
 class TestResultUpdate(TestResultBase):
-    def test_success(self):
-        _id = self._create_d()
+    def setUp(self):
+        super(TestResultUpdate, self).setUp()
+        self.req_d_id = self._create_d()
 
+    @executor.update(httplib.OK, '_assert_update_ti')
+    def test_success(self):
         new_ti = copy.deepcopy(self.trust_indicator)
         new_ti.current += self.update_step
         new_ti.histories.append(
@@ -337,13 +336,16 @@ class TestResultUpdate(TestResultBase):
         new_data = copy.deepcopy(self.req_d)
         new_data.trust_indicator = new_ti
         update = result_models.ResultUpdateRequest(trust_indicator=new_ti)
-        code, body = self.update(update, _id)
-        self.assertEqual(_id, body._id)
-        self.assert_res(code, body, new_data)
+        self.update_req = new_data
+        return update, self.req_d_id
 
-        code, new_body = self.get(_id)
-        self.assertEqual(_id, new_body._id)
-        self.assert_res(code, new_body, new_data)
+    def _assert_update_ti(self, request, body):
+        ti = body.trust_indicator
+        self.assertEqual(ti.current, request.trust_indicator.current)
+        if ti.histories:
+            history = ti.histories[0]
+            self.assertEqual(history.date, self.update_date)
+            self.assertEqual(history.step, self.update_step)
 
 
 if __name__ == '__main__':
index f2291a5..b232bc1 100644 (file)
@@ -7,7 +7,7 @@ import os
 
 from opnfv_testapi.common import message
 import opnfv_testapi.resources.scenario_models as models
-import test_base as base
+from opnfv_testapi.tests.unit import test_base as base
 
 
 class TestScenarioBase(base.TestBase):
index 62d0fa0..e28eaf5 100644 (file)
@@ -13,7 +13,8 @@ import unittest
 from opnfv_testapi.common import message
 from opnfv_testapi.resources import project_models
 from opnfv_testapi.resources import testcase_models
-import test_base as base
+from opnfv_testapi.tests.unit import test_base as base
+from opnfv_testapi.tests.unit import executor
 
 
 class TestCaseBase(base.TestBase):
@@ -70,6 +71,9 @@ class TestCaseBase(base.TestBase):
     def get(self, case=None):
         return super(TestCaseBase, self).get(self.project, case)
 
+    def create(self, req=None, *args):
+        return super(TestCaseBase, self).create(req, self.project)
+
     def update(self, new=None, case=None):
         return super(TestCaseBase, self).update(new, self.project, case)
 
@@ -78,54 +82,57 @@ class TestCaseBase(base.TestBase):
 
 
 class TestCaseCreate(TestCaseBase):
+    @executor.create(httplib.BAD_REQUEST, message.no_body())
     def test_noBody(self):
-        (code, body) = self.create(None, 'vping')
-        self.assertEqual(code, httplib.BAD_REQUEST)
+        return None
 
+    @executor.create(httplib.FORBIDDEN, message.not_found_base)
     def test_noProject(self):
-        code, body = self.create(self.req_d, 'noProject')
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.not_found_base, body)
+        self.project = 'noProject'
+        return self.req_d
 
+    @executor.create(httplib.BAD_REQUEST, message.missing('name'))
     def test_emptyName(self):
         req_empty = testcase_models.TestcaseCreateRequest('')
-        (code, body) = self.create(req_empty, self.project)
-        self.assertEqual(code, httplib.BAD_REQUEST)
-        self.assertIn(message.missing('name'), body)
+        return req_empty
 
+    @executor.create(httplib.BAD_REQUEST, message.missing('name'))
     def test_noneName(self):
         req_none = testcase_models.TestcaseCreateRequest(None)
-        (code, body) = self.create(req_none, self.project)
-        self.assertEqual(code, httplib.BAD_REQUEST)
-        self.assertIn(message.missing('name'), body)
+        return req_none
 
+    @executor.create(httplib.OK, '_assert_success')
     def test_success(self):
-        code, body = self.create_d()
-        self.assertEqual(code, httplib.OK)
-        self.assert_create_body(body, None, self.project)
+        return self.req_d
+
+    def _assert_success(self, body):
+        self.assert_create_body(body, self.req_d, self.project)
 
+    @executor.create(httplib.FORBIDDEN, message.exist_base)
     def test_alreadyExist(self):
         self.create_d()
-        code, body = self.create_d()
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.exist_base, body)
+        return self.req_d
 
 
 class TestCaseGet(TestCaseBase):
+    def setUp(self):
+        super(TestCaseGet, self).setUp()
+        self.create_d()
+        self.create_e()
+
+    @executor.get(httplib.NOT_FOUND, message.not_found_base)
     def test_notExist(self):
-        code, body = self.get('notExist')
-        self.assertEqual(code, httplib.NOT_FOUND)
+        return 'notExist'
 
+    @executor.get(httplib.OK, 'assert_body')
     def test_getOne(self):
-        self.create_d()
-        code, body = self.get(self.req_d.name)
-        self.assertEqual(code, httplib.OK)
-        self.assert_body(body)
+        return self.req_d.name
 
+    @executor.get(httplib.OK, '_list')
     def test_list(self):
-        self.create_d()
-        self.create_e()
-        code, body = self.get()
+        return None
+
+    def _list(self, body):
         for case in body.testcases:
             if self.req_d.name == case.name:
                 self.assert_body(case)
@@ -134,60 +141,58 @@ class TestCaseGet(TestCaseBase):
 
 
 class TestCaseUpdate(TestCaseBase):
+    def setUp(self):
+        super(TestCaseUpdate, self).setUp()
+        self.create_d()
+
+    @executor.update(httplib.BAD_REQUEST, message.no_body())
     def test_noBody(self):
-        code, _ = self.update(case='noBody')
-        self.assertEqual(code, httplib.BAD_REQUEST)
+        return None, 'noBody'
 
+    @executor.update(httplib.NOT_FOUND, message.not_found_base)
     def test_notFound(self):
-        code, _ = self.update(self.update_e, 'notFound')
-        self.assertEqual(code, httplib.NOT_FOUND)
+        return self.update_e, 'notFound'
 
+    @executor.update(httplib.FORBIDDEN, message.exist_base)
     def test_newNameExist(self):
-        self.create_d()
         self.create_e()
-        code, body = self.update(self.update_e, self.req_d.name)
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.exist_base, body)
+        return self.update_e, self.req_d.name
 
+    @executor.update(httplib.FORBIDDEN, message.no_update())
     def test_noUpdate(self):
-        self.create_d()
-        code, body = self.update(self.update_d, self.req_d.name)
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.no_update(), body)
+        return self.update_d, self.req_d.name
 
+    @executor.update(httplib.OK, '_update_success')
     def test_success(self):
-        self.create_d()
-        code, body = self.get(self.req_d.name)
-        _id = body._id
-
-        code, body = self.update(self.update_e, self.req_d.name)
-        self.assertEqual(code, httplib.OK)
-        self.assertEqual(_id, body._id)
-        self.assert_update_body(self.req_d, body, self.update_e)
-
-        _, new_body = self.get(self.req_e.name)
-        self.assertEqual(_id, new_body._id)
-        self.assert_update_body(self.req_d, new_body, self.update_e)
+        return self.update_e, self.req_d.name
 
+    @executor.update(httplib.OK, '_update_success')
     def test_with_dollar(self):
-        self.create_d()
         update = copy.deepcopy(self.update_d)
         update.description = {'2. change': 'dollar change'}
-        code, body = self.update(update, self.req_d.name)
-        self.assertEqual(code, httplib.OK)
+        return update, self.req_d.name
+
+    def _update_success(self, request, body):
+        self.assert_update_body(self.req_d, body, request)
+        _, new_body = self.get(request.name)
+        self.assert_update_body(self.req_d, new_body, request)
 
 
 class TestCaseDelete(TestCaseBase):
+    def setUp(self):
+        super(TestCaseDelete, self).setUp()
+        self.create_d()
+
+    @executor.delete(httplib.NOT_FOUND, message.not_found_base)
     def test_notFound(self):
-        code, body = self.delete('notFound')
-        self.assertEqual(code, httplib.NOT_FOUND)
+        return 'notFound'
 
+    @executor.delete(httplib.OK, '_delete_success')
     def test_success(self):
-        self.create_d()
-        code, body = self.delete(self.req_d.name)
-        self.assertEqual(code, httplib.OK)
-        self.assertEqual(body, '')
+        return self.req_d.name
 
+    def _delete_success(self, body):
+        self.assertEqual(body, '')
         code, body = self.get(self.req_d.name)
         self.assertEqual(code, httplib.NOT_FOUND)
 
index ed3eda0..ca247a3 100644 (file)
@@ -8,11 +8,12 @@ import unittest
 
 from tornado import web
 
-import fake_pymongo
 from opnfv_testapi.common import message
 from opnfv_testapi.resources import project_models
 from opnfv_testapi.router import url_mappings
-import test_base as base
+from opnfv_testapi.tests.unit import executor
+from opnfv_testapi.tests.unit import fake_pymongo
+from opnfv_testapi.tests.unit import test_base as base
 
 
 class TestToken(base.TestBase):
@@ -32,22 +33,24 @@ class TestTokenCreateProject(TestToken):
         fake_pymongo.tokens.insert({"access_token": "12345"})
         self.basePath = '/api/v1/projects'
 
+    @executor.create(httplib.FORBIDDEN, message.invalid_token())
     def test_projectCreateTokenInvalid(self):
         self.headers['X-Auth-Token'] = '1234'
-        code, body = self.create_d()
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.invalid_token(), body)
+        return self.req_d
 
+    @executor.create(httplib.UNAUTHORIZED, message.unauthorized())
     def test_projectCreateTokenUnauthorized(self):
-        self.headers.pop('X-Auth-Token')
-        code, body = self.create_d()
-        self.assertEqual(code, httplib.UNAUTHORIZED)
-        self.assertIn(message.unauthorized(), body)
+        if 'X-Auth-Token' in self.headers:
+            self.headers.pop('X-Auth-Token')
+        return self.req_d
 
+    @executor.create(httplib.OK, '_create_success')
     def test_projectCreateTokenSuccess(self):
         self.headers['X-Auth-Token'] = '12345'
-        code, body = self.create_d()
-        self.assertEqual(code, httplib.OK)
+        return self.req_d
+
+    def _create_success(self, body):
+        self.assertIn('CreateResponse', str(type(body)))
 
 
 class TestTokenDeleteProject(TestToken):
@@ -56,28 +59,25 @@ class TestTokenDeleteProject(TestToken):
         self.req_d = project_models.ProjectCreateRequest('vping')
         fake_pymongo.tokens.insert({"access_token": "12345"})
         self.basePath = '/api/v1/projects'
-
-    def test_projectDeleteTokenIvalid(self):
         self.headers['X-Auth-Token'] = '12345'
         self.create_d()
+
+    @executor.delete(httplib.FORBIDDEN, message.invalid_token())
+    def test_projectDeleteTokenIvalid(self):
         self.headers['X-Auth-Token'] = '1234'
-        code, body = self.delete(self.req_d.name)
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.invalid_token(), body)
+        return self.req_d.name
 
+    @executor.delete(httplib.UNAUTHORIZED, message.unauthorized())
     def test_projectDeleteTokenUnauthorized(self):
-        self.headers['X-Auth-Token'] = '12345'
-        self.create_d()
         self.headers.pop('X-Auth-Token')
-        code, body = self.delete(self.req_d.name)
-        self.assertEqual(code, httplib.UNAUTHORIZED)
-        self.assertIn(message.unauthorized(), body)
+        return self.req_d.name
 
+    @executor.delete(httplib.OK, '_delete_success')
     def test_projectDeleteTokenSuccess(self):
-        self.headers['X-Auth-Token'] = '12345'
-        self.create_d()
-        code, body = self.delete(self.req_d.name)
-        self.assertEqual(code, httplib.OK)
+        return self.req_d.name
+
+    def _delete_success(self, body):
+        self.assertEqual('', body)
 
 
 class TestTokenUpdateProject(TestToken):
@@ -86,34 +86,28 @@ class TestTokenUpdateProject(TestToken):
         self.req_d = project_models.ProjectCreateRequest('vping')
         fake_pymongo.tokens.insert({"access_token": "12345"})
         self.basePath = '/api/v1/projects'
-
-    def test_projectUpdateTokenIvalid(self):
         self.headers['X-Auth-Token'] = '12345'
         self.create_d()
-        code, body = self.get(self.req_d.name)
+
+    @executor.update(httplib.FORBIDDEN, message.invalid_token())
+    def test_projectUpdateTokenIvalid(self):
         self.headers['X-Auth-Token'] = '1234'
         req = project_models.ProjectUpdateRequest('newName', 'new description')
-        code, body = self.update(req, self.req_d.name)
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.invalid_token(), body)
+        return req, self.req_d.name
 
+    @executor.update(httplib.UNAUTHORIZED, message.unauthorized())
     def test_projectUpdateTokenUnauthorized(self):
-        self.headers['X-Auth-Token'] = '12345'
-        self.create_d()
-        code, body = self.get(self.req_d.name)
         self.headers.pop('X-Auth-Token')
         req = project_models.ProjectUpdateRequest('newName', 'new description')
-        code, body = self.update(req, self.req_d.name)
-        self.assertEqual(code, httplib.UNAUTHORIZED)
-        self.assertIn(message.unauthorized(), body)
+        return req, self.req_d.name
 
+    @executor.update(httplib.OK, '_update_success')
     def test_projectUpdateTokenSuccess(self):
-        self.headers['X-Auth-Token'] = '12345'
-        self.create_d()
-        code, body = self.get(self.req_d.name)
         req = project_models.ProjectUpdateRequest('newName', 'new description')
-        code, body = self.update(req, self.req_d.name)
-        self.assertEqual(code, httplib.OK)
+        return req, self.req_d.name
+
+    def _update_success(self, request, body):
+        self.assertIn(request.name, body)
 
 if __name__ == '__main__':
     unittest.main()
index c8f3f50..fff802a 100644 (file)
@@ -6,10 +6,12 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+import httplib
 import unittest
 
 from opnfv_testapi.resources import models
-import test_base as base
+from opnfv_testapi.tests.unit import executor
+from opnfv_testapi.tests.unit import test_base as base
 
 
 class TestVersionBase(base.TestBase):
@@ -20,12 +22,15 @@ class TestVersionBase(base.TestBase):
 
 
 class TestVersion(TestVersionBase):
+    @executor.get(httplib.OK, '_get_success')
     def test_success(self):
-        code, body = self.get()
-        self.assertEqual(200, code)
+        return None
+
+    def _get_success(self, body):
         self.assertEqual(len(body.versions), 1)
         self.assertEqual(body.versions[0].version, 'v1.0')
         self.assertEqual(body.versions[0].description, 'basics')
 
+
 if __name__ == '__main__':
     unittest.main()